comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
Or, should artifactsPath() return the artifacts path?
public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); }
this(config.artifactsPath().resolve("artifacts"));
public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { process = testRunProcessBuilder(suite, toEndpointsConfig(config)).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, String endpointsConfig) { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", "--endpoints", endpointsConfig); builder.redirectErrorStream(true); builder.directory(suitePath.toFile()); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } static String toEndpointsConfig(byte[] testConfig) throws IOException { TestConfig config = TestConfig.fromJson(testConfig); Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); } }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { TestConfig testConfig = TestConfig.fromJson(config); process = testRunProcessBuilder(suite, testConfig).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(), "--application", config.application().toFullString(), "--endpoints", toEndpointsConfig(config), "--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(), "--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString()); builder.redirectErrorStream(true); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } static String toEndpointsConfig(TestConfig config) throws IOException { Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); } }
We can't change that.
public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); }
this(config.artifactsPath().resolve("artifacts"));
public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { process = testRunProcessBuilder(suite, toEndpointsConfig(config)).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, String endpointsConfig) { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", "--endpoints", endpointsConfig); builder.redirectErrorStream(true); builder.directory(suitePath.toFile()); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } static String toEndpointsConfig(byte[] testConfig) throws IOException { TestConfig config = TestConfig.fromJson(testConfig); Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); } }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { TestConfig testConfig = TestConfig.fromJson(config); process = testRunProcessBuilder(suite, testConfig).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(), "--application", config.application().toFullString(), "--endpoints", toEndpointsConfig(config), "--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(), "--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString()); builder.redirectErrorStream(true); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } static String toEndpointsConfig(TestConfig config) throws IOException { Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); } }
and scope
static String toEndpointsConfig(TestConfig config) throws IOException { Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); }
config.deployments().get(config.zone()).forEach((cluster, url) -> {
static String toEndpointsConfig(TestConfig config) throws IOException { Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); } VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { TestConfig testConfig = TestConfig.fromJson(config); process = testRunProcessBuilder(suite, testConfig).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(), "--application", config.application().toFullString(), "--endpoints", toEndpointsConfig(config), "--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(), "--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString()); builder.redirectErrorStream(true); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); } VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { TestConfig testConfig = TestConfig.fromJson(config); process = testRunProcessBuilder(suite, testConfig).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(), "--application", config.application().toFullString(), "--endpoints", toEndpointsConfig(config), "--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(), "--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString()); builder.redirectErrorStream(true); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } }
?
static String toEndpointsConfig(TestConfig config) throws IOException { Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); }
config.deployments().get(config.zone()).forEach((cluster, url) -> {
static String toEndpointsConfig(TestConfig config) throws IOException { Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); } VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { TestConfig testConfig = TestConfig.fromJson(config); process = testRunProcessBuilder(suite, testConfig).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(), "--application", config.application().toFullString(), "--endpoints", toEndpointsConfig(config), "--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(), "--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString()); builder.redirectErrorStream(true); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); } VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { TestConfig testConfig = TestConfig.fromJson(config); process = testRunProcessBuilder(suite, testConfig).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(), "--application", config.application().toFullString(), "--endpoints", toEndpointsConfig(config), "--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(), "--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString()); builder.redirectErrorStream(true); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } }
You should add scope "zone" to each endpoint
static String toEndpointsConfig(TestConfig config) throws IOException { Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); }
config.deployments().get(config.zone()).forEach((cluster, url) -> {
static String toEndpointsConfig(TestConfig config) throws IOException { Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); } VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { TestConfig testConfig = TestConfig.fromJson(config); process = testRunProcessBuilder(suite, testConfig).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(), "--application", config.application().toFullString(), "--endpoints", toEndpointsConfig(config), "--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(), "--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString()); builder.redirectErrorStream(true); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); } VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { TestConfig testConfig = TestConfig.fromJson(config); process = testRunProcessBuilder(suite, testConfig).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(), "--application", config.application().toFullString(), "--endpoints", toEndpointsConfig(config), "--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(), "--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString()); builder.redirectErrorStream(true); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } }
Ah. So we probably end up with custom parser code for this anyway, so we don't need those. See following PR.
static String toEndpointsConfig(TestConfig config) throws IOException { Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); }
config.deployments().get(config.zone()).forEach((cluster, url) -> {
static String toEndpointsConfig(TestConfig config) throws IOException { Cursor root = new Slime().setObject(); Cursor endpointsArray = root.setArray("endpoints"); config.deployments().get(config.zone()).forEach((cluster, url) -> { Cursor endpointObject = endpointsArray.addObject(); endpointObject.setString("cluster", cluster); endpointObject.setString("url", url.toString()); }); return new String(SlimeUtils.toJsonBytes(root), UTF_8); }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); } VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { TestConfig testConfig = TestConfig.fromJson(config); process = testRunProcessBuilder(suite, testConfig).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(), "--application", config.application().toFullString(), "--endpoints", toEndpointsConfig(config), "--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(), "--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString()); builder.redirectErrorStream(true); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } }
class VespaCliTestRunner implements TestRunner { private static final Logger logger = Logger.getLogger(VespaCliTestRunner.class.getName()); private final SortedMap<Long, LogRecord> log = new ConcurrentSkipListMap<>(); private final Path artifactsPath; private AtomicReference<Status> status = new AtomicReference<>(Status.NOT_STARTED); @Inject public VespaCliTestRunner(VespaCliTestRunnerConfig config) { this(config.artifactsPath().resolve("artifacts")); } VespaCliTestRunner(Path artifactsPath) { this.artifactsPath = artifactsPath; } @Override public Collection<LogRecord> getLog(long after) { return log.tailMap(after + 1).values(); } @Override public Status getStatus() { return status.get(); } @Override public CompletableFuture<?> test(Suite suite, byte[] config) { if (status.getAndSet(RUNNING) == RUNNING) throw new IllegalStateException("Tests already running, not supposed to be started now"); return CompletableFuture.runAsync(() -> runTests(suite, config)); } @Override public boolean isSupported() { return getChildDirectory(artifactsPath, "tests").isPresent(); } void runTests(Suite suite, byte[] config) { Process process = null; try { TestConfig testConfig = TestConfig.fromJson(config); process = testRunProcessBuilder(suite, testConfig).start(); BufferedReader in = new BufferedReader(new InputStreamReader(process.getInputStream())); in.lines().forEach(line -> { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; log(Level.INFO, line, null); }); status.set(process.waitFor() == 0 ? SUCCESS : process.waitFor() == 3 ? FAILURE : ERROR); } catch (Exception e) { if (process != null) process.destroyForcibly(); log(Level.SEVERE, "Failed running tests", e); status.set(ERROR); } } ProcessBuilder testRunProcessBuilder(Suite suite, TestConfig config) throws IOException { Path suitePath = getChildDirectory(artifactsPath, "tests") .flatMap(testsPath -> getChildDirectory(testsPath, toSuiteDirectoryName(suite))) .orElseThrow(() -> new IllegalStateException("No tests found, for suite '" + suite + "'")); ProcessBuilder builder = new ProcessBuilder("vespa", "test", suitePath.toAbsolutePath().toString(), "--application", config.application().toFullString(), "--endpoints", toEndpointsConfig(config), "--data-plane-public-cert", artifactsPath.resolve("cert").toAbsolutePath().toString(), "--data-plane-private-key", artifactsPath.resolve("key").toAbsolutePath().toString()); builder.redirectErrorStream(true); return builder; } private static String toSuiteDirectoryName(Suite suite) { switch (suite) { case SYSTEM_TEST: return "system-test"; case STAGING_SETUP_TEST: return "staging-setup"; case STAGING_TEST: return "staging-test"; default: throw new IllegalArgumentException("Unsupported test suite '" + suite + "'"); } } private void log(Level level, String message, Throwable thrown) { LogRecord record = new LogRecord(level, message); record.setThrown(thrown); logger.log(record); log.put(record.getSequenceNumber(), record); } private static Optional<Path> getChildDirectory(Path parent, String name) { try (Stream<Path> children = Files.list(parent)) { return children.filter(Files::isDirectory) .filter(path -> path.endsWith(name)) .findAny(); } catch (IOException e) { throw new UncheckedIOException("Failed to list files under " + parent, e); } } }
I would reverse the order for readability. The most important one first. I would also reverse conditionNotMet to conditionMet to avoid the implicit double negation. if (reply.hasErrors() && conditionMet)
public void handleReply(Reply reply) { Object o = reply.getContext(); if (!(o instanceof ReplyContext)) { return; } ReplyContext context = (ReplyContext) o; final double latencyInSeconds = (System.currentTimeMillis() - context.creationTime) / 1000.0d; metric.set(MetricNames.LATENCY, latencyInSeconds, null); DocumentOperationType type = DocumentOperationType.fromMessage(reply.getMessage()); boolean conditionNotMet = conditionNotMet(reply); if (!conditionNotMet && reply.hasErrors()) { DocumentOperationStatus status = DocumentOperationStatus.fromMessageBusErrorCodes(reply.getErrorCodes()); metricsHelper.reportFailure(type, status); metric.add(MetricNames.FAILED, 1, null); enqueue(context, reply.getError(0).getMessage(), ErrorCode.ERROR, conditionNotMet, reply.getTrace()); } else { metricsHelper.reportSuccessful(type, latencyInSeconds); metric.add(MetricNames.SUCCEEDED, 1, null); if (conditionNotMet) metric.add(MetricNames.TEST_AND_SET_CONDITION_NOT_MET, 1, null); enqueue(context, "Document processed.", ErrorCode.OK, false, reply.getTrace()); } }
if (!conditionNotMet && reply.hasErrors()) {
public void handleReply(Reply reply) { Object o = reply.getContext(); if (!(o instanceof ReplyContext)) { return; } ReplyContext context = (ReplyContext) o; final double latencyInSeconds = (System.currentTimeMillis() - context.creationTime) / 1000.0d; metric.set(MetricNames.LATENCY, latencyInSeconds, null); DocumentOperationType type = DocumentOperationType.fromMessage(reply.getMessage()); boolean conditionMet = conditionMet(reply); if (reply.hasErrors() && conditionMet) { DocumentOperationStatus status = DocumentOperationStatus.fromMessageBusErrorCodes(reply.getErrorCodes()); metricsHelper.reportFailure(type, status); metric.add(MetricNames.FAILED, 1, null); enqueue(context, reply.getError(0).getMessage(), ErrorCode.ERROR, false, reply.getTrace()); } else { metricsHelper.reportSuccessful(type, latencyInSeconds); metric.add(MetricNames.SUCCEEDED, 1, null); if (!conditionMet) metric.add(MetricNames.TEST_AND_SET_CONDITION_NOT_MET, 1, null); enqueue(context, "Document processed.", ErrorCode.OK, !conditionMet, reply.getTrace()); } }
class FeedReplyReader implements ReplyHandler { private static final Logger log = Logger.getLogger(FeedReplyReader.class.getName()); private final Metric metric; private final DocumentApiMetrics metricsHelper; public FeedReplyReader(Metric metric, DocumentApiMetrics metricsHelper) { this.metric = metric; this.metricsHelper = metricsHelper; } @Override private static boolean conditionNotMet(Reply reply) { return reply.hasErrors() && reply.getError(0).getCode() == DocumentProtocol.ERROR_TEST_AND_SET_CONDITION_FAILED; } private void enqueue(ReplyContext context, String message, ErrorCode status, boolean isConditionNotMet, Trace trace) { try { String traceMessage = (trace != null && trace.getLevel() > 0) ? trace.toString() : ""; context.feedReplies.put(new OperationStatus(message, context.docId, status, isConditionNotMet, traceMessage)); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while enqueueing result from putting document with id: " + context.docId); Thread.currentThread().interrupt(); } } }
class FeedReplyReader implements ReplyHandler { private static final Logger log = Logger.getLogger(FeedReplyReader.class.getName()); private final Metric metric; private final DocumentApiMetrics metricsHelper; public FeedReplyReader(Metric metric, DocumentApiMetrics metricsHelper) { this.metric = metric; this.metricsHelper = metricsHelper; } @Override private static boolean conditionMet(Reply reply) { return !reply.hasErrors() || reply.getError(0).getCode() != DocumentProtocol.ERROR_TEST_AND_SET_CONDITION_FAILED; } private void enqueue(ReplyContext context, String message, ErrorCode status, boolean isConditionNotMet, Trace trace) { try { String traceMessage = (trace != null && trace.getLevel() > 0) ? trace.toString() : ""; context.feedReplies.put(new OperationStatus(message, context.docId, status, isConditionNotMet, traceMessage)); } catch (InterruptedException e) { log.log(Level.WARNING, "Interrupted while enqueueing result from putting document with id: " + context.docId); Thread.currentThread().interrupt(); } } }
This is not used until line 59, so consider moving declaration down.
void testSetup() throws IOException { Path temp = Files.createTempDirectory("vespa-cli-test-runner-test-"); temp.toFile().deleteOnExit(); Path tests = Files.createDirectory(temp.resolve("tests")); Path artifacts = Files.createDirectory(temp.resolve("artifacts")); VespaCliTestRunner runner = new VespaCliTestRunner(artifacts, tests); assertFalse(runner.isSupported()); Path systemTests = Files.createDirectory(tests.resolve("system-test")); assertTrue(runner.isSupported()); IllegalStateException ise = assertThrows(IllegalStateException.class, () -> runner.testRunProcessBuilder(TestRunner.Suite.STAGING_TEST, testConfig)); assertEquals("No tests found, for suite 'STAGING_TEST'", ise.getMessage()); ProcessBuilder builder = runner.testRunProcessBuilder(TestRunner.Suite.SYSTEM_TEST, testConfig); assertEquals(List.of("vespa", "test", systemTests.toAbsolutePath().toString(), "--application", "t.a.i", "--zone", "dev.aws-us-east-1c"), builder.command()); assertEquals("{\"endpoints\":[{\"cluster\":\"default\",\"url\":\"https: builder.environment().get("VESPA_CLI_ENDPOINTS")); assertEquals(artifacts.resolve("key").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_KEY_FILE")); assertEquals(artifacts.resolve("cert").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_CERT_FILE")); }
Path systemTests = Files.createDirectory(tests.resolve("system-test"));
void testSetup() throws IOException { Path temp = Files.createTempDirectory("vespa-cli-test-runner-test-"); temp.toFile().deleteOnExit(); Path tests = Files.createDirectory(temp.resolve("tests")); Path artifacts = Files.createDirectory(temp.resolve("artifacts")); VespaCliTestRunner runner = new VespaCliTestRunner(artifacts, tests); assertFalse(runner.isSupported()); Path systemTests = Files.createDirectory(tests.resolve("system-test")); assertTrue(runner.isSupported()); IllegalStateException ise = assertThrows(IllegalStateException.class, () -> runner.testRunProcessBuilder(TestRunner.Suite.STAGING_TEST, testConfig)); assertEquals("No tests found, for suite 'STAGING_TEST'", ise.getMessage()); ProcessBuilder builder = runner.testRunProcessBuilder(TestRunner.Suite.SYSTEM_TEST, testConfig); assertEquals(List.of("vespa", "test", systemTests.toAbsolutePath().toString(), "--application", "t.a.i", "--zone", "dev.aws-us-east-1c"), builder.command()); assertEquals("{\"endpoints\":[{\"cluster\":\"default\",\"url\":\"https: builder.environment().get("VESPA_CLI_ENDPOINTS")); assertEquals(artifacts.resolve("key").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_KEY_FILE")); assertEquals(artifacts.resolve("cert").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_CERT_FILE")); }
class VespaCliTestRunnerTest { static final TestConfig testConfig = TestConfig.fromJson(("{\n" + " \"application\": \"t:a:i\",\n" + " \"zone\": \"dev.aws-us-east-1c\",\n" + " \"system\": \"publiccd\",\n" + " \"isCI\": true,\n" + " \"zoneEndpoints\": {\n" + " \"dev.aws-us-east-1c\": {\n" + " \"default\": \"https: " },\n" + " \"prod.aws-us-east-1a\": {\n" + " \"default\": \"https: " }\n" + " },\n" + " \"clusters\": {\n" + " \"prod.aws-us-east-1c\": [\n" + " \"documents\"\n" + " ]\n" + " }\n" + "}\n").getBytes(StandardCharsets.UTF_8)); @Test }
class VespaCliTestRunnerTest { static final TestConfig testConfig = TestConfig.fromJson(("{\n" + " \"application\": \"t:a:i\",\n" + " \"zone\": \"dev.aws-us-east-1c\",\n" + " \"system\": \"publiccd\",\n" + " \"isCI\": true,\n" + " \"zoneEndpoints\": {\n" + " \"dev.aws-us-east-1c\": {\n" + " \"default\": \"https: " },\n" + " \"prod.aws-us-east-1a\": {\n" + " \"default\": \"https: " }\n" + " },\n" + " \"clusters\": {\n" + " \"prod.aws-us-east-1c\": [\n" + " \"documents\"\n" + " ]\n" + " }\n" + "}\n").getBytes(StandardCharsets.UTF_8)); @Test }
It's the thing that makes the test "supported", so it needs to stay precisely where it is :)
void testSetup() throws IOException { Path temp = Files.createTempDirectory("vespa-cli-test-runner-test-"); temp.toFile().deleteOnExit(); Path tests = Files.createDirectory(temp.resolve("tests")); Path artifacts = Files.createDirectory(temp.resolve("artifacts")); VespaCliTestRunner runner = new VespaCliTestRunner(artifacts, tests); assertFalse(runner.isSupported()); Path systemTests = Files.createDirectory(tests.resolve("system-test")); assertTrue(runner.isSupported()); IllegalStateException ise = assertThrows(IllegalStateException.class, () -> runner.testRunProcessBuilder(TestRunner.Suite.STAGING_TEST, testConfig)); assertEquals("No tests found, for suite 'STAGING_TEST'", ise.getMessage()); ProcessBuilder builder = runner.testRunProcessBuilder(TestRunner.Suite.SYSTEM_TEST, testConfig); assertEquals(List.of("vespa", "test", systemTests.toAbsolutePath().toString(), "--application", "t.a.i", "--zone", "dev.aws-us-east-1c"), builder.command()); assertEquals("{\"endpoints\":[{\"cluster\":\"default\",\"url\":\"https: builder.environment().get("VESPA_CLI_ENDPOINTS")); assertEquals(artifacts.resolve("key").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_KEY_FILE")); assertEquals(artifacts.resolve("cert").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_CERT_FILE")); }
Path systemTests = Files.createDirectory(tests.resolve("system-test"));
void testSetup() throws IOException { Path temp = Files.createTempDirectory("vespa-cli-test-runner-test-"); temp.toFile().deleteOnExit(); Path tests = Files.createDirectory(temp.resolve("tests")); Path artifacts = Files.createDirectory(temp.resolve("artifacts")); VespaCliTestRunner runner = new VespaCliTestRunner(artifacts, tests); assertFalse(runner.isSupported()); Path systemTests = Files.createDirectory(tests.resolve("system-test")); assertTrue(runner.isSupported()); IllegalStateException ise = assertThrows(IllegalStateException.class, () -> runner.testRunProcessBuilder(TestRunner.Suite.STAGING_TEST, testConfig)); assertEquals("No tests found, for suite 'STAGING_TEST'", ise.getMessage()); ProcessBuilder builder = runner.testRunProcessBuilder(TestRunner.Suite.SYSTEM_TEST, testConfig); assertEquals(List.of("vespa", "test", systemTests.toAbsolutePath().toString(), "--application", "t.a.i", "--zone", "dev.aws-us-east-1c"), builder.command()); assertEquals("{\"endpoints\":[{\"cluster\":\"default\",\"url\":\"https: builder.environment().get("VESPA_CLI_ENDPOINTS")); assertEquals(artifacts.resolve("key").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_KEY_FILE")); assertEquals(artifacts.resolve("cert").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_CERT_FILE")); }
class VespaCliTestRunnerTest { static final TestConfig testConfig = TestConfig.fromJson(("{\n" + " \"application\": \"t:a:i\",\n" + " \"zone\": \"dev.aws-us-east-1c\",\n" + " \"system\": \"publiccd\",\n" + " \"isCI\": true,\n" + " \"zoneEndpoints\": {\n" + " \"dev.aws-us-east-1c\": {\n" + " \"default\": \"https: " },\n" + " \"prod.aws-us-east-1a\": {\n" + " \"default\": \"https: " }\n" + " },\n" + " \"clusters\": {\n" + " \"prod.aws-us-east-1c\": [\n" + " \"documents\"\n" + " ]\n" + " }\n" + "}\n").getBytes(StandardCharsets.UTF_8)); @Test }
class VespaCliTestRunnerTest { static final TestConfig testConfig = TestConfig.fromJson(("{\n" + " \"application\": \"t:a:i\",\n" + " \"zone\": \"dev.aws-us-east-1c\",\n" + " \"system\": \"publiccd\",\n" + " \"isCI\": true,\n" + " \"zoneEndpoints\": {\n" + " \"dev.aws-us-east-1c\": {\n" + " \"default\": \"https: " },\n" + " \"prod.aws-us-east-1a\": {\n" + " \"default\": \"https: " }\n" + " },\n" + " \"clusters\": {\n" + " \"prod.aws-us-east-1c\": [\n" + " \"documents\"\n" + " ]\n" + " }\n" + "}\n").getBytes(StandardCharsets.UTF_8)); @Test }
Ah, for some reason I didn't see the multiplex declaration (w/ createDirectory()).
void testSetup() throws IOException { Path temp = Files.createTempDirectory("vespa-cli-test-runner-test-"); temp.toFile().deleteOnExit(); Path tests = Files.createDirectory(temp.resolve("tests")); Path artifacts = Files.createDirectory(temp.resolve("artifacts")); VespaCliTestRunner runner = new VespaCliTestRunner(artifacts, tests); assertFalse(runner.isSupported()); Path systemTests = Files.createDirectory(tests.resolve("system-test")); assertTrue(runner.isSupported()); IllegalStateException ise = assertThrows(IllegalStateException.class, () -> runner.testRunProcessBuilder(TestRunner.Suite.STAGING_TEST, testConfig)); assertEquals("No tests found, for suite 'STAGING_TEST'", ise.getMessage()); ProcessBuilder builder = runner.testRunProcessBuilder(TestRunner.Suite.SYSTEM_TEST, testConfig); assertEquals(List.of("vespa", "test", systemTests.toAbsolutePath().toString(), "--application", "t.a.i", "--zone", "dev.aws-us-east-1c"), builder.command()); assertEquals("{\"endpoints\":[{\"cluster\":\"default\",\"url\":\"https: builder.environment().get("VESPA_CLI_ENDPOINTS")); assertEquals(artifacts.resolve("key").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_KEY_FILE")); assertEquals(artifacts.resolve("cert").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_CERT_FILE")); }
Path systemTests = Files.createDirectory(tests.resolve("system-test"));
void testSetup() throws IOException { Path temp = Files.createTempDirectory("vespa-cli-test-runner-test-"); temp.toFile().deleteOnExit(); Path tests = Files.createDirectory(temp.resolve("tests")); Path artifacts = Files.createDirectory(temp.resolve("artifacts")); VespaCliTestRunner runner = new VespaCliTestRunner(artifacts, tests); assertFalse(runner.isSupported()); Path systemTests = Files.createDirectory(tests.resolve("system-test")); assertTrue(runner.isSupported()); IllegalStateException ise = assertThrows(IllegalStateException.class, () -> runner.testRunProcessBuilder(TestRunner.Suite.STAGING_TEST, testConfig)); assertEquals("No tests found, for suite 'STAGING_TEST'", ise.getMessage()); ProcessBuilder builder = runner.testRunProcessBuilder(TestRunner.Suite.SYSTEM_TEST, testConfig); assertEquals(List.of("vespa", "test", systemTests.toAbsolutePath().toString(), "--application", "t.a.i", "--zone", "dev.aws-us-east-1c"), builder.command()); assertEquals("{\"endpoints\":[{\"cluster\":\"default\",\"url\":\"https: builder.environment().get("VESPA_CLI_ENDPOINTS")); assertEquals(artifacts.resolve("key").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_KEY_FILE")); assertEquals(artifacts.resolve("cert").toAbsolutePath().toString(), builder.environment().get("VESPA_CLI_DATA_PLANE_CERT_FILE")); }
class VespaCliTestRunnerTest { static final TestConfig testConfig = TestConfig.fromJson(("{\n" + " \"application\": \"t:a:i\",\n" + " \"zone\": \"dev.aws-us-east-1c\",\n" + " \"system\": \"publiccd\",\n" + " \"isCI\": true,\n" + " \"zoneEndpoints\": {\n" + " \"dev.aws-us-east-1c\": {\n" + " \"default\": \"https: " },\n" + " \"prod.aws-us-east-1a\": {\n" + " \"default\": \"https: " }\n" + " },\n" + " \"clusters\": {\n" + " \"prod.aws-us-east-1c\": [\n" + " \"documents\"\n" + " ]\n" + " }\n" + "}\n").getBytes(StandardCharsets.UTF_8)); @Test }
class VespaCliTestRunnerTest { static final TestConfig testConfig = TestConfig.fromJson(("{\n" + " \"application\": \"t:a:i\",\n" + " \"zone\": \"dev.aws-us-east-1c\",\n" + " \"system\": \"publiccd\",\n" + " \"isCI\": true,\n" + " \"zoneEndpoints\": {\n" + " \"dev.aws-us-east-1c\": {\n" + " \"default\": \"https: " },\n" + " \"prod.aws-us-east-1a\": {\n" + " \"default\": \"https: " }\n" + " },\n" + " \"clusters\": {\n" + " \"prod.aws-us-east-1c\": [\n" + " \"documents\"\n" + " ]\n" + " }\n" + "}\n").getBytes(StandardCharsets.UTF_8)); @Test }
Please add actual and configured item count to the error message.
private static String canonicalize(QueryTree query, Integer maxQueryItems) { ListIterator<Item> rootItemIterator = query.getItemIterator(); CanonicalizationResult result = recursivelyCanonicalize(rootItemIterator.next(), rootItemIterator); if (query.isEmpty() && ! result.isError()) result = CanonicalizationResult.error("No query"); if (query.getTreeSize() > maxQueryItems) result = CanonicalizationResult.error("Query tree exceeds allowed item count"); return result.error().orElse(null); }
if (query.getTreeSize() > maxQueryItems) result = CanonicalizationResult.error("Query tree exceeds allowed item count");
private static String canonicalize(QueryTree query, Integer maxQueryItems) { ListIterator<Item> rootItemIterator = query.getItemIterator(); CanonicalizationResult result = recursivelyCanonicalize(rootItemIterator.next(), rootItemIterator); if (query.isEmpty() && ! result.isError()) result = CanonicalizationResult.error("No query"); int itemCount = query.treeSize(); if (itemCount > maxQueryItems) result = CanonicalizationResult.error(String.format("Query tree exceeds allowed item count. Configured limit: %d - Item count: %d", maxQueryItems, itemCount)); return result.error().orElse(null); }
class QueryCanonicalizer { /** The name of the operation performed by this, for use in search chain ordering */ public static final String queryCanonicalization = "queryCanonicalization"; private static final CompoundName MAX_QUERY_ITEMS = new CompoundName("maxQueryItems"); /** * Validates this query and carries out possible operations on this query * which simplifies it without changing its semantics. * * @return null if the query is valid, an error message if it is invalid */ public static String canonicalize(Query query) { Integer maxQueryItems = query.properties().getInteger(MAX_QUERY_ITEMS, Integer.MAX_VALUE); return canonicalize(query.getModel().getQueryTree(), maxQueryItems); } /** * Canonicalizes this query, allowing any query tree size * * @return null if the query is valid, an error message if it is invalid */ public static String canonicalize(QueryTree queryTree) { return canonicalize(queryTree, Integer.MAX_VALUE); } /** * Canonicalizes this query * * @return null if the query is valid, an error message if it is invalid */ /** * Canonicalize this query * * @param item the item to canonicalize * @param parentIterator iterator for the parent of this item, never null * @return true if the given query is valid, false otherwise */ private static CanonicalizationResult recursivelyCanonicalize(Item item, ListIterator<Item> parentIterator) { if (item instanceof CompositeItem) { CompositeItem composite = (CompositeItem)item; for (ListIterator<Item> i = composite.getItemIterator(); i.hasNext(); ) { CanonicalizationResult childResult = recursivelyCanonicalize(i.next(), i); if (childResult.isError()) return childResult; } } return canonicalizeThis(item, parentIterator); } private static CanonicalizationResult canonicalizeThis(Item item, ListIterator<Item> parentIterator) { if (item instanceof NullItem) parentIterator.remove(); if ( ! (item instanceof CompositeItem)) return CanonicalizationResult.success(); CompositeItem composite = (CompositeItem)item; boolean replacedByFalse = collapseFalse(composite, parentIterator); if (replacedByFalse) return CanonicalizationResult.success(); collapseLevels(composite); if (composite instanceof EquivItem) { removeDuplicates((EquivItem) composite); } else if (composite instanceof RankItem) { makeDuplicatesCheap((RankItem)composite); } else if (composite instanceof NotItem) { if (((NotItem) composite).getPositiveItem() == null) return CanonicalizationResult.error("Can not search for only negative items"); } if (composite.getItemCount() == 0) parentIterator.remove(); composite.extractSingleChild().ifPresent(extractedChild -> parentIterator.set(extractedChild)); return CanonicalizationResult.success(); } private static void collapseLevels(CompositeItem composite) { if (composite instanceof RankItem || composite instanceof NotItem) { collapseLevels(composite, composite.getItemIterator()); } else if (composite instanceof AndItem || composite instanceof OrItem || composite instanceof WeakAndItem) { for (ListIterator<Item> i = composite.getItemIterator(); i.hasNext(); ) collapseLevels(composite, i); } } /** Collapse the next item of this iterator into the given parent, if appropriate */ private static void collapseLevels(CompositeItem composite, ListIterator<Item> i) { if ( ! i.hasNext()) return; Item child = i.next(); if (child == null) return; if (child.getClass() != composite.getClass()) return; if (child instanceof WeakAndItem && !equalWeakAndSettings((WeakAndItem)child, (WeakAndItem)composite)) return; i.remove(); moveChildren((CompositeItem) child, i); } private static boolean equalWeakAndSettings(WeakAndItem a, WeakAndItem b) { if ( ! a.getIndexName().equals(b.getIndexName())) return false; if (a.getN() != b.getN()) return false; return true; } private static void moveChildren(CompositeItem from, ListIterator<Item> toIterator) { for (ListIterator<Item> i = from.getItemIterator(); i.hasNext(); ) toIterator.add(i.next()); } /** * Handle FALSE items in the immediate children of this * * @return true if this composite was replaced by FALSE */ private static boolean collapseFalse(CompositeItem composite, ListIterator<Item> parentIterator) { if ( ! containsFalse(composite)) return false; if (composite instanceof AndItem) { parentIterator.set(new FalseItem()); return true; } else if (composite instanceof OrItem) { removeFalseIn(composite.getItemIterator()); return false; } else if (composite instanceof NotItem || composite instanceof RankItem) { ListIterator<Item> i = composite.getItemIterator(); if (i.next() instanceof FalseItem) { parentIterator.set(new FalseItem()); return true; } else { removeFalseIn(i); return false; } } else { return false; } } private static boolean containsFalse(CompositeItem composite) { for (ListIterator<Item> i = composite.getItemIterator(); i.hasNext(); ) if (i.next() instanceof FalseItem) return true; return false; } private static void removeFalseIn(ListIterator<Item> iterator) { while (iterator.hasNext()) if (iterator.next() instanceof FalseItem) iterator.remove(); } private static void removeDuplicates(EquivItem composite) { int origSize = composite.getItemCount(); for (int i = origSize - 1; i >= 1; --i) { Item deleteCandidate = composite.getItem(i); for (int j = 0; j < i; ++j) { Item check = composite.getItem(j); if (deleteCandidate.getClass() == check.getClass()) { if (deleteCandidate instanceof PhraseItem) { PhraseItem phraseDeletionCandidate = (PhraseItem) deleteCandidate; PhraseItem phraseToCheck = (PhraseItem) check; if (phraseDeletionCandidate.getIndexedString().equals(phraseToCheck.getIndexedString())) { composite.removeItem(i); break; } } else if (deleteCandidate instanceof PhraseSegmentItem) { PhraseSegmentItem phraseSegmentDeletionCandidate = (PhraseSegmentItem) deleteCandidate; PhraseSegmentItem phraseSegmentToCheck = (PhraseSegmentItem) check; if (phraseSegmentDeletionCandidate.getIndexedString().equals(phraseSegmentToCheck.getIndexedString())) { composite.removeItem(i); break; } } else if (deleteCandidate instanceof BlockItem) { BlockItem blockDeletionCandidate = (BlockItem) deleteCandidate; BlockItem blockToCheck = (BlockItem) check; if (blockDeletionCandidate.stringValue().equals(blockToCheck.stringValue())) { composite.removeItem(i); break; } } } } } } /** * If a term is present as both a rank term (i.e not the first child) and in * the match condition (first child), then turn off any rank calculation for * the term during matching, as it will be made available anyway for matches * by the same term in the rank part. * * @param rankItem an item which will be simplified in place */ private static void makeDuplicatesCheap(RankItem rankItem) { Set<TermItem> rankTerms = new HashSet<>(); for (int i = 1; i < rankItem.getItemCount(); i++) { if (rankItem.getItem(i) instanceof TermItem) rankTerms.add((TermItem)rankItem.getItem(i)); } makeDuplicatesCheap(rankItem.getItem(0), rankTerms); } private static void makeDuplicatesCheap(Item item, Set<TermItem> rankTerms) { if (item instanceof CompositeItem) { for (ListIterator<Item> i = ((CompositeItem)item).getItemIterator(); i.hasNext();) makeDuplicatesCheap(i.next(), rankTerms); } else if (rankTerms.contains(item)) { item.setRanked(false); item.setPositionData(false); } } public static class CanonicalizationResult { private final Optional<String> error; private CanonicalizationResult(Optional<String> error) { this.error = error; } /** Returns the error of this query, or empty if it is a valid query */ public Optional<String> error() { return error; } public static CanonicalizationResult error(String error) { return new CanonicalizationResult(Optional.of(error)); } public static CanonicalizationResult success() { return new CanonicalizationResult(Optional.empty()); } public boolean isError() { return error.isPresent(); } } }
class QueryCanonicalizer { /** The name of the operation performed by this, for use in search chain ordering */ public static final String queryCanonicalization = "queryCanonicalization"; private static final CompoundName MAX_QUERY_ITEMS = new CompoundName("maxQueryItems"); /** * Validates this query and carries out possible operations on this query * which simplifies it without changing its semantics. * * @return null if the query is valid, an error message if it is invalid */ public static String canonicalize(Query query) { Integer maxQueryItems = query.properties().getInteger(MAX_QUERY_ITEMS, Integer.MAX_VALUE); return canonicalize(query.getModel().getQueryTree(), maxQueryItems); } /** * Canonicalizes this query, allowing any query tree size * * @return null if the query is valid, an error message if it is invalid */ public static String canonicalize(QueryTree queryTree) { return canonicalize(queryTree, Integer.MAX_VALUE); } /** * Canonicalizes this query * * @return null if the query is valid, an error message if it is invalid */ /** * Canonicalize this query * * @param item the item to canonicalize * @param parentIterator iterator for the parent of this item, never null * @return true if the given query is valid, false otherwise */ private static CanonicalizationResult recursivelyCanonicalize(Item item, ListIterator<Item> parentIterator) { if (item instanceof CompositeItem) { CompositeItem composite = (CompositeItem)item; for (ListIterator<Item> i = composite.getItemIterator(); i.hasNext(); ) { CanonicalizationResult childResult = recursivelyCanonicalize(i.next(), i); if (childResult.isError()) return childResult; } } return canonicalizeThis(item, parentIterator); } private static CanonicalizationResult canonicalizeThis(Item item, ListIterator<Item> parentIterator) { if (item instanceof NullItem) parentIterator.remove(); if ( ! (item instanceof CompositeItem)) return CanonicalizationResult.success(); CompositeItem composite = (CompositeItem)item; boolean replacedByFalse = collapseFalse(composite, parentIterator); if (replacedByFalse) return CanonicalizationResult.success(); collapseLevels(composite); if (composite instanceof EquivItem) { removeDuplicates((EquivItem) composite); } else if (composite instanceof RankItem) { makeDuplicatesCheap((RankItem)composite); } else if (composite instanceof NotItem) { if (((NotItem) composite).getPositiveItem() == null) return CanonicalizationResult.error("Can not search for only negative items"); } if (composite.getItemCount() == 0) parentIterator.remove(); composite.extractSingleChild().ifPresent(extractedChild -> parentIterator.set(extractedChild)); return CanonicalizationResult.success(); } private static void collapseLevels(CompositeItem composite) { if (composite instanceof RankItem || composite instanceof NotItem) { collapseLevels(composite, composite.getItemIterator()); } else if (composite instanceof AndItem || composite instanceof OrItem || composite instanceof WeakAndItem) { for (ListIterator<Item> i = composite.getItemIterator(); i.hasNext(); ) collapseLevels(composite, i); } } /** Collapse the next item of this iterator into the given parent, if appropriate */ private static void collapseLevels(CompositeItem composite, ListIterator<Item> i) { if ( ! i.hasNext()) return; Item child = i.next(); if (child == null) return; if (child.getClass() != composite.getClass()) return; if (child instanceof WeakAndItem && !equalWeakAndSettings((WeakAndItem)child, (WeakAndItem)composite)) return; i.remove(); moveChildren((CompositeItem) child, i); } private static boolean equalWeakAndSettings(WeakAndItem a, WeakAndItem b) { if ( ! a.getIndexName().equals(b.getIndexName())) return false; if (a.getN() != b.getN()) return false; return true; } private static void moveChildren(CompositeItem from, ListIterator<Item> toIterator) { for (ListIterator<Item> i = from.getItemIterator(); i.hasNext(); ) toIterator.add(i.next()); } /** * Handle FALSE items in the immediate children of this * * @return true if this composite was replaced by FALSE */ private static boolean collapseFalse(CompositeItem composite, ListIterator<Item> parentIterator) { if ( ! containsFalse(composite)) return false; if (composite instanceof AndItem) { parentIterator.set(new FalseItem()); return true; } else if (composite instanceof OrItem) { removeFalseIn(composite.getItemIterator()); return false; } else if (composite instanceof NotItem || composite instanceof RankItem) { ListIterator<Item> i = composite.getItemIterator(); if (i.next() instanceof FalseItem) { parentIterator.set(new FalseItem()); return true; } else { removeFalseIn(i); return false; } } else { return false; } } private static boolean containsFalse(CompositeItem composite) { for (ListIterator<Item> i = composite.getItemIterator(); i.hasNext(); ) if (i.next() instanceof FalseItem) return true; return false; } private static void removeFalseIn(ListIterator<Item> iterator) { while (iterator.hasNext()) if (iterator.next() instanceof FalseItem) iterator.remove(); } private static void removeDuplicates(EquivItem composite) { int origSize = composite.getItemCount(); for (int i = origSize - 1; i >= 1; --i) { Item deleteCandidate = composite.getItem(i); for (int j = 0; j < i; ++j) { Item check = composite.getItem(j); if (deleteCandidate.getClass() == check.getClass()) { if (deleteCandidate instanceof PhraseItem) { PhraseItem phraseDeletionCandidate = (PhraseItem) deleteCandidate; PhraseItem phraseToCheck = (PhraseItem) check; if (phraseDeletionCandidate.getIndexedString().equals(phraseToCheck.getIndexedString())) { composite.removeItem(i); break; } } else if (deleteCandidate instanceof PhraseSegmentItem) { PhraseSegmentItem phraseSegmentDeletionCandidate = (PhraseSegmentItem) deleteCandidate; PhraseSegmentItem phraseSegmentToCheck = (PhraseSegmentItem) check; if (phraseSegmentDeletionCandidate.getIndexedString().equals(phraseSegmentToCheck.getIndexedString())) { composite.removeItem(i); break; } } else if (deleteCandidate instanceof BlockItem) { BlockItem blockDeletionCandidate = (BlockItem) deleteCandidate; BlockItem blockToCheck = (BlockItem) check; if (blockDeletionCandidate.stringValue().equals(blockToCheck.stringValue())) { composite.removeItem(i); break; } } } } } } /** * If a term is present as both a rank term (i.e not the first child) and in * the match condition (first child), then turn off any rank calculation for * the term during matching, as it will be made available anyway for matches * by the same term in the rank part. * * @param rankItem an item which will be simplified in place */ private static void makeDuplicatesCheap(RankItem rankItem) { Set<TermItem> rankTerms = new HashSet<>(); for (int i = 1; i < rankItem.getItemCount(); i++) { if (rankItem.getItem(i) instanceof TermItem) rankTerms.add((TermItem)rankItem.getItem(i)); } makeDuplicatesCheap(rankItem.getItem(0), rankTerms); } private static void makeDuplicatesCheap(Item item, Set<TermItem> rankTerms) { if (item instanceof CompositeItem) { for (ListIterator<Item> i = ((CompositeItem)item).getItemIterator(); i.hasNext();) makeDuplicatesCheap(i.next(), rankTerms); } else if (rankTerms.contains(item)) { item.setRanked(false); item.setPositionData(false); } } public static class CanonicalizationResult { private final Optional<String> error; private CanonicalizationResult(Optional<String> error) { this.error = error; } /** Returns the error of this query, or empty if it is a valid query */ public Optional<String> error() { return error; } public static CanonicalizationResult error(String error) { return new CanonicalizationResult(Optional.of(error)); } public static CanonicalizationResult success() { return new CanonicalizationResult(Optional.empty()); } public boolean isError() { return error.isPresent(); } } }
Yes, I not sure how expensive it is or if there is a more efficient way to do this, otherwise I guess we'll have to cache this?
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
if (suspended(host)) return false;
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
Consider saying how many bytes were omitted
public LogRecord toLog(String line) { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; buffer.reset(); try (PrintStream formatter = new PrintStream(new HtmlAnsiOutputStream(buffer))) { formatter.print(line); } return new LogRecord(HTML, buffer.toString(UTF_8)); }
line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)";
public LogRecord toLog(String line) { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (" + (line.length() - (1 << 13)) + " bytes truncated due to size)"; buffer.reset(); try (PrintStream formatter = new PrintStream(new HtmlAnsiOutputStream(buffer))) { formatter.print(line); } return new LogRecord(HTML, buffer.toString(UTF_8)); }
class HtmlLogger { public static final Level HTML = new Level("html", 1) { }; private final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); }
class HtmlLogger { public static final Level HTML = new Level("html", 1) { }; private final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); }
What will 2 spaces be represented as?
void testConversionToHtml() { String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString(); List<String> messages = List.of(splitMessage.split("\n")); LogRecord html0 = new HtmlLogger().toLog(messages.get(0)); assertEquals("html", html0.getLevel().getName()); assertEquals("<span style=\"color: red;\">&lt;/body&gt;Hello!</span>", html0.getMessage()); LogRecord html1 = new HtmlLogger().toLog(messages.get(1)); assertEquals("html", html1.getLevel().getName()); assertEquals("continued", html1.getMessage()); }
String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString();
void testConversionToHtml() { String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString(); List<String> messages = List.of(splitMessage.split("\n")); LogRecord html0 = new HtmlLogger().toLog(messages.get(0)); assertEquals("html", html0.getLevel().getName()); assertEquals("<span style=\"color: red;\">&lt;/body&gt;Hello!</span>", html0.getMessage()); LogRecord html1 = new HtmlLogger().toLog(messages.get(1)); assertEquals("html", html1.getLevel().getName()); assertEquals("continued", html1.getMessage()); }
class HtmlLoggerTest { @Test }
class HtmlLoggerTest { @Test }
2 space.
void testConversionToHtml() { String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString(); List<String> messages = List.of(splitMessage.split("\n")); LogRecord html0 = new HtmlLogger().toLog(messages.get(0)); assertEquals("html", html0.getLevel().getName()); assertEquals("<span style=\"color: red;\">&lt;/body&gt;Hello!</span>", html0.getMessage()); LogRecord html1 = new HtmlLogger().toLog(messages.get(1)); assertEquals("html", html1.getLevel().getName()); assertEquals("continued", html1.getMessage()); }
String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString();
void testConversionToHtml() { String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString(); List<String> messages = List.of(splitMessage.split("\n")); LogRecord html0 = new HtmlLogger().toLog(messages.get(0)); assertEquals("html", html0.getLevel().getName()); assertEquals("<span style=\"color: red;\">&lt;/body&gt;Hello!</span>", html0.getMessage()); LogRecord html1 = new HtmlLogger().toLog(messages.get(1)); assertEquals("html", html1.getLevel().getName()); assertEquals("continued", html1.getMessage()); }
class HtmlLoggerTest { @Test }
class HtmlLoggerTest { @Test }
Good idea.
public LogRecord toLog(String line) { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)"; buffer.reset(); try (PrintStream formatter = new PrintStream(new HtmlAnsiOutputStream(buffer))) { formatter.print(line); } return new LogRecord(HTML, buffer.toString(UTF_8)); }
line = line.substring(0, 1 << 13) + " ... (this log entry was truncated due to size)";
public LogRecord toLog(String line) { if (line.length() > 1 << 13) line = line.substring(0, 1 << 13) + " ... (" + (line.length() - (1 << 13)) + " bytes truncated due to size)"; buffer.reset(); try (PrintStream formatter = new PrintStream(new HtmlAnsiOutputStream(buffer))) { formatter.print(line); } return new LogRecord(HTML, buffer.toString(UTF_8)); }
class HtmlLogger { public static final Level HTML = new Level("html", 1) { }; private final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); }
class HtmlLogger { public static final Level HTML = new Level("html", 1) { }; private final ByteArrayOutputStream buffer = new ByteArrayOutputStream(); }
so that will be collapsed to 1 space by HTML rendering then?
void testConversionToHtml() { String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString(); List<String> messages = List.of(splitMessage.split("\n")); LogRecord html0 = new HtmlLogger().toLog(messages.get(0)); assertEquals("html", html0.getLevel().getName()); assertEquals("<span style=\"color: red;\">&lt;/body&gt;Hello!</span>", html0.getMessage()); LogRecord html1 = new HtmlLogger().toLog(messages.get(1)); assertEquals("html", html1.getLevel().getName()); assertEquals("continued", html1.getMessage()); }
String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString();
void testConversionToHtml() { String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString(); List<String> messages = List.of(splitMessage.split("\n")); LogRecord html0 = new HtmlLogger().toLog(messages.get(0)); assertEquals("html", html0.getLevel().getName()); assertEquals("<span style=\"color: red;\">&lt;/body&gt;Hello!</span>", html0.getMessage()); LogRecord html1 = new HtmlLogger().toLog(messages.get(1)); assertEquals("html", html1.getLevel().getName()); assertEquals("continued", html1.getMessage()); }
class HtmlLoggerTest { @Test }
class HtmlLoggerTest { @Test }
Not any longer!
void testConversionToHtml() { String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString(); List<String> messages = List.of(splitMessage.split("\n")); LogRecord html0 = new HtmlLogger().toLog(messages.get(0)); assertEquals("html", html0.getLevel().getName()); assertEquals("<span style=\"color: red;\">&lt;/body&gt;Hello!</span>", html0.getMessage()); LogRecord html1 = new HtmlLogger().toLog(messages.get(1)); assertEquals("html", html1.getLevel().getName()); assertEquals("continued", html1.getMessage()); }
String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString();
void testConversionToHtml() { String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString(); List<String> messages = List.of(splitMessage.split("\n")); LogRecord html0 = new HtmlLogger().toLog(messages.get(0)); assertEquals("html", html0.getLevel().getName()); assertEquals("<span style=\"color: red;\">&lt;/body&gt;Hello!</span>", html0.getMessage()); LogRecord html1 = new HtmlLogger().toLog(messages.get(1)); assertEquals("html", html1.getLevel().getName()); assertEquals("continued", html1.getMessage()); }
class HtmlLoggerTest { @Test }
class HtmlLoggerTest { @Test }
Was able to fix that in the console code, using `pre-wrap` for whitespace.
void testConversionToHtml() { String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString(); List<String> messages = List.of(splitMessage.split("\n")); LogRecord html0 = new HtmlLogger().toLog(messages.get(0)); assertEquals("html", html0.getLevel().getName()); assertEquals("<span style=\"color: red;\">&lt;/body&gt;Hello!</span>", html0.getMessage()); LogRecord html1 = new HtmlLogger().toLog(messages.get(1)); assertEquals("html", html1.getLevel().getName()); assertEquals("continued", html1.getMessage()); }
String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString();
void testConversionToHtml() { String splitMessage = Ansi.ansi().fg(Ansi.Color.RED).a("</body>Hello!\ncontinued").reset().toString(); List<String> messages = List.of(splitMessage.split("\n")); LogRecord html0 = new HtmlLogger().toLog(messages.get(0)); assertEquals("html", html0.getLevel().getName()); assertEquals("<span style=\"color: red;\">&lt;/body&gt;Hello!</span>", html0.getMessage()); LogRecord html1 = new HtmlLogger().toLog(messages.get(1)); assertEquals("html", html1.getLevel().getName()); assertEquals("continued", html1.getMessage()); }
class HtmlLoggerTest { @Test }
class HtmlLoggerTest { @Test }
Should we add an explicit check that the `test` method itself is _not_ invoked when the filter fails? (and vice versa in the case where the RPC passes the authz filter)
public void testFilterFailsRequest() { Request r = new Request("test"); r.parameters().add(new Int32Value(42)); r.parameters().add(new Int32Value(0)); r.parameters().add(new Int8Value((byte)0)); filter.allowed = false; assertFalse(filter.invoked); target.invokeSync(r, timeout); assertTrue(r.isError()); assertTrue(filter.invoked); assertEquals(ErrorCode.PERMISSION_DENIED, r.errorCode()); assertEquals("Permission denied", r.errorMessage()); }
assertTrue(filter.invoked);
public void testFilterFailsRequest() { Request r = new Request("test"); r.parameters().add(new Int32Value(42)); r.parameters().add(new Int32Value(0)); r.parameters().add(new Int8Value((byte)0)); filter.allowed = false; assertFalse(filter.invoked); target.invokeSync(r, timeout); assertTrue(r.isError()); assertTrue(filter.invoked); assertFalse(testMethod.invoked); assertEquals(ErrorCode.PERMISSION_DENIED, r.errorCode()); assertEquals("Permission denied", r.errorMessage()); }
class InvokeErrorTest { final double timeout=60.0; Supervisor server; Acceptor acceptor; Supervisor client; Target target; Test.Barrier barrier; SimpleRequestAccessFilter filter; @Before public void setUp() throws ListenFailedException { server = new Supervisor(new Transport()); client = new Supervisor(new Transport()); acceptor = server.listen(new Spec(0)); target = client.connect(new Spec("localhost", acceptor.port())); filter = new SimpleRequestAccessFilter(); server.addMethod(new Method("test", "iib", "i", this::rpc_test).requestAccessFilter(filter)); server.addMethod(new Method("test_barrier", "iib", "i", this::rpc_test_barrier)); barrier = new Test.Barrier(); } @After public void tearDown() { target.close(); acceptor.shutdown().join(); client.transport().shutdown().join(); server.transport().shutdown().join(); } private void rpc_test(Request req) { int value = req.parameters().get(0).asInt32(); int error = req.parameters().get(1).asInt32(); int extra = req.parameters().get(2).asInt8(); req.returnValues().add(new Int32Value(value)); if (extra != 0) { req.returnValues().add(new Int32Value(value)); } if (error != 0) { req.setError(error, "Custom error"); } } private void rpc_test_barrier(Request req) { rpc_test(req); barrier.waitFor(); } @org.junit.Test public void testNoError() { Request req1 = new Request("test"); req1.parameters().add(new Int32Value(42)); req1.parameters().add(new Int32Value(0)); req1.parameters().add(new Int8Value((byte)0)); target.invokeSync(req1, timeout); assertTrue(!req1.isError()); assertEquals(1, req1.returnValues().size()); assertEquals(42, req1.returnValues().get(0).asInt32()); } @org.junit.Test public void testNoSuchMethod() { Request req1 = new Request("bogus"); target.invokeSync(req1, timeout); assertTrue(req1.isError()); assertEquals(0, req1.returnValues().size()); assertEquals(ErrorCode.NO_SUCH_METHOD, req1.errorCode()); } @org.junit.Test public void testWrongParameters() { Request req1 = new Request("test"); req1.parameters().add(new Int32Value(42)); req1.parameters().add(new Int32Value(0)); req1.parameters().add(new Int32Value(0)); target.invokeSync(req1, timeout); assertTrue(req1.isError()); assertEquals(0, req1.returnValues().size()); assertEquals(ErrorCode.WRONG_PARAMS, req1.errorCode()); Request req2 = new Request("test"); req2.parameters().add(new Int32Value(42)); req2.parameters().add(new Int32Value(0)); target.invokeSync(req2, timeout); assertTrue(req2.isError()); assertEquals(0, req2.returnValues().size()); assertEquals(ErrorCode.WRONG_PARAMS, req2.errorCode()); Request req3 = new Request("test"); req3.parameters().add(new Int32Value(42)); req3.parameters().add(new Int32Value(0)); req3.parameters().add(new Int8Value((byte)0)); req3.parameters().add(new Int8Value((byte)0)); target.invokeSync(req3, timeout); assertTrue(req3.isError()); assertEquals(0, req3.returnValues().size()); assertEquals(ErrorCode.WRONG_PARAMS, req3.errorCode()); } @org.junit.Test public void testWrongReturnValues() { Request req1 = new Request("test"); req1.parameters().add(new Int32Value(42)); req1.parameters().add(new Int32Value(0)); req1.parameters().add(new Int8Value((byte)1)); target.invokeSync(req1, timeout); assertTrue(req1.isError()); assertEquals(0, req1.returnValues().size()); assertEquals(ErrorCode.WRONG_RETURN, req1.errorCode()); } @org.junit.Test public void testMethodFailed() { Request req1 = new Request("test"); req1.parameters().add(new Int32Value(42)); req1.parameters().add(new Int32Value(75000)); req1.parameters().add(new Int8Value((byte)0)); target.invokeSync(req1, timeout); assertTrue(req1.isError()); assertEquals(0, req1.returnValues().size()); assertEquals(75000, req1.errorCode()); Request req2 = new Request("test"); req2.parameters().add(new Int32Value(42)); req2.parameters().add(new Int32Value(75000)); req2.parameters().add(new Int8Value((byte)1)); target.invokeSync(req2, timeout); assertTrue(req2.isError()); assertEquals(0, req2.returnValues().size()); assertEquals(75000, req2.errorCode()); } @org.junit.Test public void testConnectionError() { Test.Waiter w = new Test.Waiter(); Request req1 = new Request("test_barrier"); req1.parameters().add(new Int32Value(42)); req1.parameters().add(new Int32Value(0)); req1.parameters().add(new Int8Value((byte)0)); target.invokeAsync(req1, timeout, w); target.close(); client.transport().sync(); barrier.breakIt(); w.waitDone(); assertTrue(!target.isValid()); assertTrue(req1.isError()); assertEquals(0, req1.returnValues().size()); assertEquals(ErrorCode.CONNECTION, req1.errorCode()); } @org.junit.Test public void testFilterIsInvoked() { Request r = new Request("test"); r.parameters().add(new Int32Value(42)); r.parameters().add(new Int32Value(0)); r.parameters().add(new Int8Value((byte)0)); assertFalse(filter.invoked); target.invokeSync(r, timeout); assertFalse(r.isError()); assertTrue(filter.invoked); } @org.junit.Test private static class SimpleRequestAccessFilter implements RequestAccessFilter { boolean invoked = false, allowed = true; @Override public boolean allow(Request r) { invoked = true; return allowed; } } }
class InvokeErrorTest { final double timeout=60.0; Supervisor server; Acceptor acceptor; Supervisor client; Target target; Test.Barrier barrier; SimpleRequestAccessFilter filter; RpcTestMethod testMethod; @Before public void setUp() throws ListenFailedException { server = new Supervisor(new Transport()); client = new Supervisor(new Transport()); acceptor = server.listen(new Spec(0)); target = client.connect(new Spec("localhost", acceptor.port())); filter = new SimpleRequestAccessFilter(); testMethod = new RpcTestMethod(); server.addMethod(new Method("test", "iib", "i", testMethod).requestAccessFilter(filter)); server.addMethod(new Method("test_barrier", "iib", "i", this::rpc_test_barrier)); barrier = new Test.Barrier(); } @After public void tearDown() { target.close(); acceptor.shutdown().join(); client.transport().shutdown().join(); server.transport().shutdown().join(); } private void rpc_test_barrier(Request req) { testMethod.invoke(req); barrier.waitFor(); } @org.junit.Test public void testNoError() { Request req1 = new Request("test"); req1.parameters().add(new Int32Value(42)); req1.parameters().add(new Int32Value(0)); req1.parameters().add(new Int8Value((byte)0)); target.invokeSync(req1, timeout); assertTrue(!req1.isError()); assertEquals(1, req1.returnValues().size()); assertEquals(42, req1.returnValues().get(0).asInt32()); } @org.junit.Test public void testNoSuchMethod() { Request req1 = new Request("bogus"); target.invokeSync(req1, timeout); assertTrue(req1.isError()); assertEquals(0, req1.returnValues().size()); assertEquals(ErrorCode.NO_SUCH_METHOD, req1.errorCode()); } @org.junit.Test public void testWrongParameters() { Request req1 = new Request("test"); req1.parameters().add(new Int32Value(42)); req1.parameters().add(new Int32Value(0)); req1.parameters().add(new Int32Value(0)); target.invokeSync(req1, timeout); assertTrue(req1.isError()); assertEquals(0, req1.returnValues().size()); assertEquals(ErrorCode.WRONG_PARAMS, req1.errorCode()); Request req2 = new Request("test"); req2.parameters().add(new Int32Value(42)); req2.parameters().add(new Int32Value(0)); target.invokeSync(req2, timeout); assertTrue(req2.isError()); assertEquals(0, req2.returnValues().size()); assertEquals(ErrorCode.WRONG_PARAMS, req2.errorCode()); Request req3 = new Request("test"); req3.parameters().add(new Int32Value(42)); req3.parameters().add(new Int32Value(0)); req3.parameters().add(new Int8Value((byte)0)); req3.parameters().add(new Int8Value((byte)0)); target.invokeSync(req3, timeout); assertTrue(req3.isError()); assertEquals(0, req3.returnValues().size()); assertEquals(ErrorCode.WRONG_PARAMS, req3.errorCode()); } @org.junit.Test public void testWrongReturnValues() { Request req1 = new Request("test"); req1.parameters().add(new Int32Value(42)); req1.parameters().add(new Int32Value(0)); req1.parameters().add(new Int8Value((byte)1)); target.invokeSync(req1, timeout); assertTrue(req1.isError()); assertEquals(0, req1.returnValues().size()); assertEquals(ErrorCode.WRONG_RETURN, req1.errorCode()); } @org.junit.Test public void testMethodFailed() { Request req1 = new Request("test"); req1.parameters().add(new Int32Value(42)); req1.parameters().add(new Int32Value(75000)); req1.parameters().add(new Int8Value((byte)0)); target.invokeSync(req1, timeout); assertTrue(req1.isError()); assertEquals(0, req1.returnValues().size()); assertEquals(75000, req1.errorCode()); Request req2 = new Request("test"); req2.parameters().add(new Int32Value(42)); req2.parameters().add(new Int32Value(75000)); req2.parameters().add(new Int8Value((byte)1)); target.invokeSync(req2, timeout); assertTrue(req2.isError()); assertEquals(0, req2.returnValues().size()); assertEquals(75000, req2.errorCode()); } @org.junit.Test public void testConnectionError() { Test.Waiter w = new Test.Waiter(); Request req1 = new Request("test_barrier"); req1.parameters().add(new Int32Value(42)); req1.parameters().add(new Int32Value(0)); req1.parameters().add(new Int8Value((byte)0)); target.invokeAsync(req1, timeout, w); target.close(); client.transport().sync(); barrier.breakIt(); w.waitDone(); assertTrue(!target.isValid()); assertTrue(req1.isError()); assertEquals(0, req1.returnValues().size()); assertEquals(ErrorCode.CONNECTION, req1.errorCode()); } @org.junit.Test private static class RpcTestMethod implements MethodHandler { boolean invoked = false; @Override public void invoke(Request req) { invoked = true; rpc_test(req); } void rpc_test(Request req) { int value = req.parameters().get(0).asInt32(); int error = req.parameters().get(1).asInt32(); int extra = req.parameters().get(2).asInt8(); req.returnValues().add(new Int32Value(value)); if (extra != 0) { req.returnValues().add(new Int32Value(value)); } if (error != 0) { req.setError(error, "Custom error"); } } } }
Stray if?
public boolean isWithin(ClusterResources min, ClusterResources max) { if (this.smallerThan(min)) return false; if (max.smallerThan(this)) return false; if (min.nodeResources().isUnspecified()) if ( ! min.nodeResources().isUnspecified() && ! this.nodeResources.justNonNumbers().compatibleWith(min.nodeResources.justNonNumbers())) return false; if ( ! max.nodeResources().isUnspecified() && ! this.nodeResources.justNonNumbers().compatibleWith(max.nodeResources.justNonNumbers())) return false; return true; }
if (min.nodeResources().isUnspecified())
public boolean isWithin(ClusterResources min, ClusterResources max) { if (this.smallerThan(min)) return false; if (max.smallerThan(this)) return false; if (min.nodeResources().isUnspecified()) if ( ! min.nodeResources().isUnspecified() && ! this.nodeResources.justNonNumbers().compatibleWith(min.nodeResources.justNonNumbers())) return false; if ( ! max.nodeResources().isUnspecified() && ! this.nodeResources.justNonNumbers().compatibleWith(max.nodeResources.justNonNumbers())) return false; return true; }
class ClusterResources { /** The node count in the cluster */ private final int nodes; /** The number of node groups in the cluster */ private final int groups; /** The resources of each node in the cluster */ private final NodeResources nodeResources; public ClusterResources(int nodes, int groups, NodeResources nodeResources) { this.nodes = nodes; this.groups = groups; this.nodeResources = Objects.requireNonNull(nodeResources); } /** Returns the total number of allocated nodes (over all groups) */ public int nodes() { return nodes; } public int groups() { return groups; } public NodeResources nodeResources() { return nodeResources; } public ClusterResources with(NodeResources resources) { return new ClusterResources(nodes, groups, resources); } public ClusterResources withNodes(int nodes) { return new ClusterResources(nodes, groups, nodeResources); } public ClusterResources withGroups(int groups) { return new ClusterResources(nodes, groups, nodeResources); } /** Returns true if this is smaller than the given resources in any dimension */ public boolean smallerThan(ClusterResources other) { if (this.nodes < other.nodes) return true; if (this.groups < other.groups) return true; if (this.nodeResources.isUnspecified() || other.nodeResources.isUnspecified()) return false; if ( ! this.nodeResources.justNumbers().satisfies(other.nodeResources.justNumbers())) return true; return false; } /** Returns true if this is within the given limits (inclusive) and is compatible with them */ /** Returns the total resources of this, that is the number of nodes times the node resources */ public NodeResources totalResources() { return nodeResources.withVcpu(nodeResources.vcpu() * nodes) .withMemoryGb(nodeResources.memoryGb() * nodes) .withDiskGb(nodeResources.diskGb() * nodes) .withBandwidthGbps(nodeResources.bandwidthGbps() * nodes); } public ClusterResources justNumbers() { return new ClusterResources(nodes, groups, nodeResources.justNumbers()); } /** Returns the standard cost of these resources, in dollars per hour */ public double cost() { return nodes * nodeResources.cost(); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! (o instanceof ClusterResources)) return false; ClusterResources other = (ClusterResources)o; if (other.nodes != this.nodes) return false; if (other.groups != this.groups) return false; if ( ! other.nodeResources.equals(this.nodeResources)) return false; return true; } @Override public int hashCode() { return Objects.hash(nodes, groups, nodeResources); } @Override public String toString() { return nodes + " nodes" + (groups > 1 ? " (in " + groups + " groups)" : "") + " with " + nodeResources; } }
class ClusterResources { /** The node count in the cluster */ private final int nodes; /** The number of node groups in the cluster */ private final int groups; /** The resources of each node in the cluster */ private final NodeResources nodeResources; public ClusterResources(int nodes, int groups, NodeResources nodeResources) { this.nodes = nodes; this.groups = groups; this.nodeResources = Objects.requireNonNull(nodeResources); } /** Returns the total number of allocated nodes (over all groups) */ public int nodes() { return nodes; } public int groups() { return groups; } public NodeResources nodeResources() { return nodeResources; } public ClusterResources with(NodeResources resources) { return new ClusterResources(nodes, groups, resources); } public ClusterResources withNodes(int nodes) { return new ClusterResources(nodes, groups, nodeResources); } public ClusterResources withGroups(int groups) { return new ClusterResources(nodes, groups, nodeResources); } /** Returns true if this is smaller than the given resources in any dimension */ public boolean smallerThan(ClusterResources other) { if (this.nodes < other.nodes) return true; if (this.groups < other.groups) return true; if (this.nodeResources.isUnspecified() || other.nodeResources.isUnspecified()) return false; if ( ! this.nodeResources.justNumbers().satisfies(other.nodeResources.justNumbers())) return true; return false; } /** Returns true if this is within the given limits (inclusive) and is compatible with them */ /** Returns the total resources of this, that is the number of nodes times the node resources */ public NodeResources totalResources() { return nodeResources.withVcpu(nodeResources.vcpu() * nodes) .withMemoryGb(nodeResources.memoryGb() * nodes) .withDiskGb(nodeResources.diskGb() * nodes) .withBandwidthGbps(nodeResources.bandwidthGbps() * nodes); } public ClusterResources justNumbers() { return new ClusterResources(nodes, groups, nodeResources.justNumbers()); } /** Returns the standard cost of these resources, in dollars per hour */ public double cost() { return nodes * nodeResources.cost(); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! (o instanceof ClusterResources)) return false; ClusterResources other = (ClusterResources)o; if (other.nodes != this.nodes) return false; if (other.groups != this.groups) return false; if ( ! other.nodeResources.equals(this.nodeResources)) return false; return true; } @Override public int hashCode() { return Objects.hash(nodes, groups, nodeResources); } @Override public String toString() { return nodes + " nodes" + (groups > 1 ? " (in " + groups + " groups)" : "") + " with " + nodeResources; } }
Yes - thanks!
public boolean isWithin(ClusterResources min, ClusterResources max) { if (this.smallerThan(min)) return false; if (max.smallerThan(this)) return false; if (min.nodeResources().isUnspecified()) if ( ! min.nodeResources().isUnspecified() && ! this.nodeResources.justNonNumbers().compatibleWith(min.nodeResources.justNonNumbers())) return false; if ( ! max.nodeResources().isUnspecified() && ! this.nodeResources.justNonNumbers().compatibleWith(max.nodeResources.justNonNumbers())) return false; return true; }
if (min.nodeResources().isUnspecified())
public boolean isWithin(ClusterResources min, ClusterResources max) { if (this.smallerThan(min)) return false; if (max.smallerThan(this)) return false; if (min.nodeResources().isUnspecified()) if ( ! min.nodeResources().isUnspecified() && ! this.nodeResources.justNonNumbers().compatibleWith(min.nodeResources.justNonNumbers())) return false; if ( ! max.nodeResources().isUnspecified() && ! this.nodeResources.justNonNumbers().compatibleWith(max.nodeResources.justNonNumbers())) return false; return true; }
class ClusterResources { /** The node count in the cluster */ private final int nodes; /** The number of node groups in the cluster */ private final int groups; /** The resources of each node in the cluster */ private final NodeResources nodeResources; public ClusterResources(int nodes, int groups, NodeResources nodeResources) { this.nodes = nodes; this.groups = groups; this.nodeResources = Objects.requireNonNull(nodeResources); } /** Returns the total number of allocated nodes (over all groups) */ public int nodes() { return nodes; } public int groups() { return groups; } public NodeResources nodeResources() { return nodeResources; } public ClusterResources with(NodeResources resources) { return new ClusterResources(nodes, groups, resources); } public ClusterResources withNodes(int nodes) { return new ClusterResources(nodes, groups, nodeResources); } public ClusterResources withGroups(int groups) { return new ClusterResources(nodes, groups, nodeResources); } /** Returns true if this is smaller than the given resources in any dimension */ public boolean smallerThan(ClusterResources other) { if (this.nodes < other.nodes) return true; if (this.groups < other.groups) return true; if (this.nodeResources.isUnspecified() || other.nodeResources.isUnspecified()) return false; if ( ! this.nodeResources.justNumbers().satisfies(other.nodeResources.justNumbers())) return true; return false; } /** Returns true if this is within the given limits (inclusive) and is compatible with them */ /** Returns the total resources of this, that is the number of nodes times the node resources */ public NodeResources totalResources() { return nodeResources.withVcpu(nodeResources.vcpu() * nodes) .withMemoryGb(nodeResources.memoryGb() * nodes) .withDiskGb(nodeResources.diskGb() * nodes) .withBandwidthGbps(nodeResources.bandwidthGbps() * nodes); } public ClusterResources justNumbers() { return new ClusterResources(nodes, groups, nodeResources.justNumbers()); } /** Returns the standard cost of these resources, in dollars per hour */ public double cost() { return nodes * nodeResources.cost(); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! (o instanceof ClusterResources)) return false; ClusterResources other = (ClusterResources)o; if (other.nodes != this.nodes) return false; if (other.groups != this.groups) return false; if ( ! other.nodeResources.equals(this.nodeResources)) return false; return true; } @Override public int hashCode() { return Objects.hash(nodes, groups, nodeResources); } @Override public String toString() { return nodes + " nodes" + (groups > 1 ? " (in " + groups + " groups)" : "") + " with " + nodeResources; } }
class ClusterResources { /** The node count in the cluster */ private final int nodes; /** The number of node groups in the cluster */ private final int groups; /** The resources of each node in the cluster */ private final NodeResources nodeResources; public ClusterResources(int nodes, int groups, NodeResources nodeResources) { this.nodes = nodes; this.groups = groups; this.nodeResources = Objects.requireNonNull(nodeResources); } /** Returns the total number of allocated nodes (over all groups) */ public int nodes() { return nodes; } public int groups() { return groups; } public NodeResources nodeResources() { return nodeResources; } public ClusterResources with(NodeResources resources) { return new ClusterResources(nodes, groups, resources); } public ClusterResources withNodes(int nodes) { return new ClusterResources(nodes, groups, nodeResources); } public ClusterResources withGroups(int groups) { return new ClusterResources(nodes, groups, nodeResources); } /** Returns true if this is smaller than the given resources in any dimension */ public boolean smallerThan(ClusterResources other) { if (this.nodes < other.nodes) return true; if (this.groups < other.groups) return true; if (this.nodeResources.isUnspecified() || other.nodeResources.isUnspecified()) return false; if ( ! this.nodeResources.justNumbers().satisfies(other.nodeResources.justNumbers())) return true; return false; } /** Returns true if this is within the given limits (inclusive) and is compatible with them */ /** Returns the total resources of this, that is the number of nodes times the node resources */ public NodeResources totalResources() { return nodeResources.withVcpu(nodeResources.vcpu() * nodes) .withMemoryGb(nodeResources.memoryGb() * nodes) .withDiskGb(nodeResources.diskGb() * nodes) .withBandwidthGbps(nodeResources.bandwidthGbps() * nodes); } public ClusterResources justNumbers() { return new ClusterResources(nodes, groups, nodeResources.justNumbers()); } /** Returns the standard cost of these resources, in dollars per hour */ public double cost() { return nodes * nodeResources.cost(); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! (o instanceof ClusterResources)) return false; ClusterResources other = (ClusterResources)o; if (other.nodes != this.nodes) return false; if (other.groups != this.groups) return false; if ( ! other.nodeResources.equals(this.nodeResources)) return false; return true; } @Override public int hashCode() { return Objects.hash(nodes, groups, nodeResources); } @Override public String toString() { return nodes + " nodes" + (groups > 1 ? " (in " + groups + " groups)" : "") + " with " + nodeResources; } }
Throw ?
private void buildConfig(DataTypeCollection type, DocumentmanagerConfig.Builder documentConfigBuilder, Set<DataType> built) { List<DataType> todo = new ArrayList<>(type.getTypes()); Collections.sort(todo, (a, b) -> (a.getName().equals(b.getName()) ? a.getId() - b.getId() : a.getName().compareTo(b.getName()))); for (DataType dataType : todo) { if (built.contains(dataType)) continue; built.add(dataType); if (dataType instanceof TemporaryStructuredDataType) { System.err.println("still temporary [1]: "+dataType); continue; } if ((dataType.getId() < 0) || (dataType.getId()> DataType.lastPredefinedDataTypeId())) { Datatype.Builder dataTypeBuilder = new Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(dataType, dataTypeBuilder); } } }
System.err.println("still temporary [1]: "+dataType);
private void buildConfig(DataTypeCollection type, DocumentmanagerConfig.Builder documentConfigBuilder, Set<DataType> built) { List<DataType> todo = new ArrayList<>(type.getTypes()); Collections.sort(todo, (a, b) -> (a.getName().equals(b.getName()) ? a.getId() - b.getId() : a.getName().compareTo(b.getName()))); for (DataType dataType : todo) { if (built.contains(dataType)) continue; built.add(dataType); if (dataType instanceof TemporaryStructuredDataType) { throw new IllegalArgumentException("Can not create config for temporary data type: " + dataType.getName()); } if ((dataType.getId() < 0) || (dataType.getId()> DataType.lastPredefinedDataTypeId())) { Datatype.Builder dataTypeBuilder = new Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(dataType, dataTypeBuilder); } } }
class DocumentManager { private boolean useV8GeoPositions = false; public DocumentManager useV8GeoPositions(boolean value) { this.useV8GeoPositions = value; return this; } public DocumentmanagerConfig.Builder produce(DocumentModel model, DocumentmanagerConfig.Builder documentConfigBuilder) { documentConfigBuilder.enablecompression(false); documentConfigBuilder.usev8geopositions(this.useV8GeoPositions); Set<DataType> handled = new HashSet<>(); for(NewDocumentType documentType : model.getDocumentManager().getTypes()) { buildConfig(documentType, documentConfigBuilder, handled); buildConfig(documentType.getAnnotations(), documentConfigBuilder); if (documentType != VespaDocumentType.INSTANCE && ! handled.contains(documentType)) { handled.add(documentType); DocumentmanagerConfig.Datatype.Builder dataTypeBuilder = new DocumentmanagerConfig.Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(documentType, dataTypeBuilder); } } return documentConfigBuilder; } @SuppressWarnings("deprecation") private void buildConfig(AnnotationType type, DocumentmanagerConfig.Annotationtype.Builder atb) { atb. id(type.getId()). name(type.getName()); if (type.getDataType() != null) { atb.datatype(type.getDataType().getId()); } if ( ! type.getInheritedTypes().isEmpty()) { for (AnnotationType inherited : type.getInheritedTypes()) { atb.inherits(new DocumentmanagerConfig.Annotationtype.Inherits.Builder().id(inherited.getId())); } } } private void buildConfig(Collection<AnnotationType> types, DocumentmanagerConfig.Builder builder) { for (AnnotationType type : types) { DocumentmanagerConfig.Annotationtype.Builder atb = new DocumentmanagerConfig.Annotationtype.Builder(); buildConfig(type, atb); builder.annotationtype(atb); } } @SuppressWarnings("deprecation") private void buildConfig(DataType type, Datatype.Builder builder) { builder.id(type.getId()); if (type instanceof ArrayDataType) { CollectionDataType dt = (CollectionDataType) type; builder.arraytype(new Datatype.Arraytype.Builder().datatype(dt.getNestedType().getId())); } else if (type instanceof WeightedSetDataType) { WeightedSetDataType dt = (WeightedSetDataType) type; builder.weightedsettype(new Datatype.Weightedsettype.Builder(). datatype(dt.getNestedType().getId()). createifnonexistant(dt.createIfNonExistent()). removeifzero(dt.removeIfZero())); } else if (type instanceof MapDataType) { MapDataType mtype = (MapDataType) type; builder.maptype(new Datatype.Maptype.Builder(). keytype(mtype.getKeyType().getId()). valtype(mtype.getValueType().getId())); } else if (type instanceof DocumentType) { System.err.println("still a DocumentType: "+type); DocumentType dt = (DocumentType) type; Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); builder.documenttype(doc); doc. name(dt.getName()). headerstruct(dt.contentStruct().getId()); for (DocumentType inherited : dt.getInheritedTypes()) { doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName())); } } else if (type instanceof NewDocumentType) { NewDocumentType dt = (NewDocumentType) type; Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); builder.documenttype(doc); doc. name(dt.getName()). headerstruct(dt.getHeader().getId()); for (NewDocumentType inherited : dt.getInherited()) { doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName())); } buildConfig(dt.getFieldSets(), doc); buildImportedFieldsConfig(dt.getImportedFieldNames(), doc); } else if (type instanceof TemporaryStructuredDataType) { System.err.println("still temporary [2]: "+type); } else if (type instanceof StructDataType) { StructDataType structType = (StructDataType) type; Datatype.Structtype.Builder structBuilder = new Datatype.Structtype.Builder(); builder.structtype(structBuilder); structBuilder.name(structType.getName()); for (com.yahoo.document.Field field : structType.getFieldsThisTypeOnly()) { Datatype.Structtype.Field.Builder fieldBuilder = new Datatype.Structtype.Field.Builder(); structBuilder.field(fieldBuilder); fieldBuilder.name(field.getName()); if (field.hasForcedId()) { fieldBuilder.id(new Datatype.Structtype.Field.Id.Builder().id(field.getId())); } fieldBuilder.datatype(field.getDataType().getId()); if (field.getDataType() instanceof TensorDataType) fieldBuilder.detailedtype(((TensorDataType)field.getDataType()).getTensorType().toString()); } for (StructDataType inherited : structType.getInheritedTypes()) { structBuilder.inherits(new Datatype.Structtype.Inherits.Builder().name(inherited.getName())); } } else if (type instanceof AnnotationReferenceDataType) { AnnotationReferenceDataType annotationRef = (AnnotationReferenceDataType) type; builder.annotationreftype(new Datatype.Annotationreftype.Builder().annotation(annotationRef.getAnnotationType().getName())); } else if (type instanceof TensorDataType) { } else if (type instanceof ReferenceDataType) { ReferenceDataType refType = (ReferenceDataType) type; builder.referencetype(new Datatype.Referencetype.Builder().target_type_id(refType.getTargetType().getId())); } else { throw new IllegalArgumentException("Can not create config for data type '" + type.getName()); } } private void buildConfig(Set<FieldSet> fieldSets, Datatype.Documenttype.Builder doc) { for (FieldSet builtinFs : fieldSets) { buildConfig(builtinFs, doc); } } private void buildConfig(FieldSet fs, Datatype.Documenttype.Builder doc) { doc.fieldsets(fs.getName(), new Datatype.Documenttype.Fieldsets.Builder().fields(fs.getFieldNames())); } private void buildImportedFieldsConfig(Collection<String> fieldNames, Datatype.Documenttype.Builder builder) { for (String fieldName : fieldNames) { var ib = new DocumentmanagerConfig.Datatype.Documenttype.Importedfield.Builder(); ib.name(fieldName); builder.importedfield(ib); } } }
class DocumentManager { private boolean useV8GeoPositions = false; public DocumentManager useV8GeoPositions(boolean value) { this.useV8GeoPositions = value; return this; } public DocumentmanagerConfig.Builder produce(DocumentModel model, DocumentmanagerConfig.Builder documentConfigBuilder) { documentConfigBuilder.enablecompression(false); documentConfigBuilder.usev8geopositions(this.useV8GeoPositions); Set<DataType> handled = new HashSet<>(); for(NewDocumentType documentType : model.getDocumentManager().getTypes()) { buildConfig(documentType, documentConfigBuilder, handled); buildConfig(documentType.getAnnotations(), documentConfigBuilder); if (documentType != VespaDocumentType.INSTANCE && ! handled.contains(documentType)) { handled.add(documentType); DocumentmanagerConfig.Datatype.Builder dataTypeBuilder = new DocumentmanagerConfig.Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(documentType, dataTypeBuilder); } } return documentConfigBuilder; } @SuppressWarnings("deprecation") private void buildConfig(AnnotationType type, DocumentmanagerConfig.Annotationtype.Builder atb) { atb. id(type.getId()). name(type.getName()); if (type.getDataType() != null) { atb.datatype(type.getDataType().getId()); } if ( ! type.getInheritedTypes().isEmpty()) { for (AnnotationType inherited : type.getInheritedTypes()) { atb.inherits(new DocumentmanagerConfig.Annotationtype.Inherits.Builder().id(inherited.getId())); } } } private void buildConfig(Collection<AnnotationType> types, DocumentmanagerConfig.Builder builder) { for (AnnotationType type : types) { DocumentmanagerConfig.Annotationtype.Builder atb = new DocumentmanagerConfig.Annotationtype.Builder(); buildConfig(type, atb); builder.annotationtype(atb); } } @SuppressWarnings("deprecation") private void buildConfig(DataType type, Datatype.Builder builder) { builder.id(type.getId()); if (type instanceof ArrayDataType) { CollectionDataType dt = (CollectionDataType) type; builder.arraytype(new Datatype.Arraytype.Builder().datatype(dt.getNestedType().getId())); } else if (type instanceof WeightedSetDataType) { WeightedSetDataType dt = (WeightedSetDataType) type; builder.weightedsettype(new Datatype.Weightedsettype.Builder(). datatype(dt.getNestedType().getId()). createifnonexistant(dt.createIfNonExistent()). removeifzero(dt.removeIfZero())); } else if (type instanceof MapDataType) { MapDataType mtype = (MapDataType) type; builder.maptype(new Datatype.Maptype.Builder(). keytype(mtype.getKeyType().getId()). valtype(mtype.getValueType().getId())); } else if (type instanceof DocumentType) { throw new IllegalArgumentException("Can not create config for unadorned document type: " + type.getName()); } else if (type instanceof NewDocumentType) { NewDocumentType dt = (NewDocumentType) type; Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); builder.documenttype(doc); doc. name(dt.getName()). headerstruct(dt.getHeader().getId()); for (NewDocumentType inherited : dt.getInherited()) { doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName())); } buildConfig(dt.getFieldSets(), doc); buildImportedFieldsConfig(dt.getImportedFieldNames(), doc); } else if (type instanceof TemporaryStructuredDataType) { throw new IllegalArgumentException("Can not create config for temporary data type: " + type.getName()); } else if (type instanceof StructDataType) { StructDataType structType = (StructDataType) type; Datatype.Structtype.Builder structBuilder = new Datatype.Structtype.Builder(); builder.structtype(structBuilder); structBuilder.name(structType.getName()); for (com.yahoo.document.Field field : structType.getFieldsThisTypeOnly()) { Datatype.Structtype.Field.Builder fieldBuilder = new Datatype.Structtype.Field.Builder(); structBuilder.field(fieldBuilder); fieldBuilder.name(field.getName()); if (field.hasForcedId()) { fieldBuilder.id(new Datatype.Structtype.Field.Id.Builder().id(field.getId())); } fieldBuilder.datatype(field.getDataType().getId()); if (field.getDataType() instanceof TensorDataType) fieldBuilder.detailedtype(((TensorDataType)field.getDataType()).getTensorType().toString()); } for (StructDataType inherited : structType.getInheritedTypes()) { structBuilder.inherits(new Datatype.Structtype.Inherits.Builder().name(inherited.getName())); } } else if (type instanceof AnnotationReferenceDataType) { AnnotationReferenceDataType annotationRef = (AnnotationReferenceDataType) type; builder.annotationreftype(new Datatype.Annotationreftype.Builder().annotation(annotationRef.getAnnotationType().getName())); } else if (type instanceof TensorDataType) { } else if (type instanceof ReferenceDataType) { ReferenceDataType refType = (ReferenceDataType) type; builder.referencetype(new Datatype.Referencetype.Builder().target_type_id(refType.getTargetType().getId())); } else { throw new IllegalArgumentException("Can not create config for data type '" + type.getName()); } } private void buildConfig(Set<FieldSet> fieldSets, Datatype.Documenttype.Builder doc) { for (FieldSet builtinFs : fieldSets) { buildConfig(builtinFs, doc); } } private void buildConfig(FieldSet fs, Datatype.Documenttype.Builder doc) { doc.fieldsets(fs.getName(), new Datatype.Documenttype.Fieldsets.Builder().fields(fs.getFieldNames())); } private void buildImportedFieldsConfig(Collection<String> fieldNames, Datatype.Documenttype.Builder builder) { for (String fieldName : fieldNames) { var ib = new DocumentmanagerConfig.Datatype.Documenttype.Importedfield.Builder(); ib.name(fieldName); builder.importedfield(ib); } } }
Throw ?
private void buildConfig(DataType type, Datatype.Builder builder) { builder.id(type.getId()); if (type instanceof ArrayDataType) { CollectionDataType dt = (CollectionDataType) type; builder.arraytype(new Datatype.Arraytype.Builder().datatype(dt.getNestedType().getId())); } else if (type instanceof WeightedSetDataType) { WeightedSetDataType dt = (WeightedSetDataType) type; builder.weightedsettype(new Datatype.Weightedsettype.Builder(). datatype(dt.getNestedType().getId()). createifnonexistant(dt.createIfNonExistent()). removeifzero(dt.removeIfZero())); } else if (type instanceof MapDataType) { MapDataType mtype = (MapDataType) type; builder.maptype(new Datatype.Maptype.Builder(). keytype(mtype.getKeyType().getId()). valtype(mtype.getValueType().getId())); } else if (type instanceof DocumentType) { System.err.println("still a DocumentType: "+type); DocumentType dt = (DocumentType) type; Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); builder.documenttype(doc); doc. name(dt.getName()). headerstruct(dt.contentStruct().getId()); for (DocumentType inherited : dt.getInheritedTypes()) { doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName())); } } else if (type instanceof NewDocumentType) { NewDocumentType dt = (NewDocumentType) type; Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); builder.documenttype(doc); doc. name(dt.getName()). headerstruct(dt.getHeader().getId()); for (NewDocumentType inherited : dt.getInherited()) { doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName())); } buildConfig(dt.getFieldSets(), doc); buildImportedFieldsConfig(dt.getImportedFieldNames(), doc); } else if (type instanceof TemporaryStructuredDataType) { System.err.println("still temporary [2]: "+type); } else if (type instanceof StructDataType) { StructDataType structType = (StructDataType) type; Datatype.Structtype.Builder structBuilder = new Datatype.Structtype.Builder(); builder.structtype(structBuilder); structBuilder.name(structType.getName()); for (com.yahoo.document.Field field : structType.getFieldsThisTypeOnly()) { Datatype.Structtype.Field.Builder fieldBuilder = new Datatype.Structtype.Field.Builder(); structBuilder.field(fieldBuilder); fieldBuilder.name(field.getName()); if (field.hasForcedId()) { fieldBuilder.id(new Datatype.Structtype.Field.Id.Builder().id(field.getId())); } fieldBuilder.datatype(field.getDataType().getId()); if (field.getDataType() instanceof TensorDataType) fieldBuilder.detailedtype(((TensorDataType)field.getDataType()).getTensorType().toString()); } for (StructDataType inherited : structType.getInheritedTypes()) { structBuilder.inherits(new Datatype.Structtype.Inherits.Builder().name(inherited.getName())); } } else if (type instanceof AnnotationReferenceDataType) { AnnotationReferenceDataType annotationRef = (AnnotationReferenceDataType) type; builder.annotationreftype(new Datatype.Annotationreftype.Builder().annotation(annotationRef.getAnnotationType().getName())); } else if (type instanceof TensorDataType) { } else if (type instanceof ReferenceDataType) { ReferenceDataType refType = (ReferenceDataType) type; builder.referencetype(new Datatype.Referencetype.Builder().target_type_id(refType.getTargetType().getId())); } else { throw new IllegalArgumentException("Can not create config for data type '" + type.getName()); } }
System.err.println("still a DocumentType: "+type);
private void buildConfig(DataType type, Datatype.Builder builder) { builder.id(type.getId()); if (type instanceof ArrayDataType) { CollectionDataType dt = (CollectionDataType) type; builder.arraytype(new Datatype.Arraytype.Builder().datatype(dt.getNestedType().getId())); } else if (type instanceof WeightedSetDataType) { WeightedSetDataType dt = (WeightedSetDataType) type; builder.weightedsettype(new Datatype.Weightedsettype.Builder(). datatype(dt.getNestedType().getId()). createifnonexistant(dt.createIfNonExistent()). removeifzero(dt.removeIfZero())); } else if (type instanceof MapDataType) { MapDataType mtype = (MapDataType) type; builder.maptype(new Datatype.Maptype.Builder(). keytype(mtype.getKeyType().getId()). valtype(mtype.getValueType().getId())); } else if (type instanceof DocumentType) { throw new IllegalArgumentException("Can not create config for unadorned document type: " + type.getName()); } else if (type instanceof NewDocumentType) { NewDocumentType dt = (NewDocumentType) type; Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); builder.documenttype(doc); doc. name(dt.getName()). headerstruct(dt.getHeader().getId()); for (NewDocumentType inherited : dt.getInherited()) { doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName())); } buildConfig(dt.getFieldSets(), doc); buildImportedFieldsConfig(dt.getImportedFieldNames(), doc); } else if (type instanceof TemporaryStructuredDataType) { throw new IllegalArgumentException("Can not create config for temporary data type: " + type.getName()); } else if (type instanceof StructDataType) { StructDataType structType = (StructDataType) type; Datatype.Structtype.Builder structBuilder = new Datatype.Structtype.Builder(); builder.structtype(structBuilder); structBuilder.name(structType.getName()); for (com.yahoo.document.Field field : structType.getFieldsThisTypeOnly()) { Datatype.Structtype.Field.Builder fieldBuilder = new Datatype.Structtype.Field.Builder(); structBuilder.field(fieldBuilder); fieldBuilder.name(field.getName()); if (field.hasForcedId()) { fieldBuilder.id(new Datatype.Structtype.Field.Id.Builder().id(field.getId())); } fieldBuilder.datatype(field.getDataType().getId()); if (field.getDataType() instanceof TensorDataType) fieldBuilder.detailedtype(((TensorDataType)field.getDataType()).getTensorType().toString()); } for (StructDataType inherited : structType.getInheritedTypes()) { structBuilder.inherits(new Datatype.Structtype.Inherits.Builder().name(inherited.getName())); } } else if (type instanceof AnnotationReferenceDataType) { AnnotationReferenceDataType annotationRef = (AnnotationReferenceDataType) type; builder.annotationreftype(new Datatype.Annotationreftype.Builder().annotation(annotationRef.getAnnotationType().getName())); } else if (type instanceof TensorDataType) { } else if (type instanceof ReferenceDataType) { ReferenceDataType refType = (ReferenceDataType) type; builder.referencetype(new Datatype.Referencetype.Builder().target_type_id(refType.getTargetType().getId())); } else { throw new IllegalArgumentException("Can not create config for data type '" + type.getName()); } }
class DocumentManager { private boolean useV8GeoPositions = false; public DocumentManager useV8GeoPositions(boolean value) { this.useV8GeoPositions = value; return this; } public DocumentmanagerConfig.Builder produce(DocumentModel model, DocumentmanagerConfig.Builder documentConfigBuilder) { documentConfigBuilder.enablecompression(false); documentConfigBuilder.usev8geopositions(this.useV8GeoPositions); Set<DataType> handled = new HashSet<>(); for(NewDocumentType documentType : model.getDocumentManager().getTypes()) { buildConfig(documentType, documentConfigBuilder, handled); buildConfig(documentType.getAnnotations(), documentConfigBuilder); if (documentType != VespaDocumentType.INSTANCE && ! handled.contains(documentType)) { handled.add(documentType); DocumentmanagerConfig.Datatype.Builder dataTypeBuilder = new DocumentmanagerConfig.Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(documentType, dataTypeBuilder); } } return documentConfigBuilder; } @SuppressWarnings("deprecation") private void buildConfig(DataTypeCollection type, DocumentmanagerConfig.Builder documentConfigBuilder, Set<DataType> built) { List<DataType> todo = new ArrayList<>(type.getTypes()); Collections.sort(todo, (a, b) -> (a.getName().equals(b.getName()) ? a.getId() - b.getId() : a.getName().compareTo(b.getName()))); for (DataType dataType : todo) { if (built.contains(dataType)) continue; built.add(dataType); if (dataType instanceof TemporaryStructuredDataType) { System.err.println("still temporary [1]: "+dataType); continue; } if ((dataType.getId() < 0) || (dataType.getId()> DataType.lastPredefinedDataTypeId())) { Datatype.Builder dataTypeBuilder = new Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(dataType, dataTypeBuilder); } } } private void buildConfig(AnnotationType type, DocumentmanagerConfig.Annotationtype.Builder atb) { atb. id(type.getId()). name(type.getName()); if (type.getDataType() != null) { atb.datatype(type.getDataType().getId()); } if ( ! type.getInheritedTypes().isEmpty()) { for (AnnotationType inherited : type.getInheritedTypes()) { atb.inherits(new DocumentmanagerConfig.Annotationtype.Inherits.Builder().id(inherited.getId())); } } } private void buildConfig(Collection<AnnotationType> types, DocumentmanagerConfig.Builder builder) { for (AnnotationType type : types) { DocumentmanagerConfig.Annotationtype.Builder atb = new DocumentmanagerConfig.Annotationtype.Builder(); buildConfig(type, atb); builder.annotationtype(atb); } } @SuppressWarnings("deprecation") private void buildConfig(Set<FieldSet> fieldSets, Datatype.Documenttype.Builder doc) { for (FieldSet builtinFs : fieldSets) { buildConfig(builtinFs, doc); } } private void buildConfig(FieldSet fs, Datatype.Documenttype.Builder doc) { doc.fieldsets(fs.getName(), new Datatype.Documenttype.Fieldsets.Builder().fields(fs.getFieldNames())); } private void buildImportedFieldsConfig(Collection<String> fieldNames, Datatype.Documenttype.Builder builder) { for (String fieldName : fieldNames) { var ib = new DocumentmanagerConfig.Datatype.Documenttype.Importedfield.Builder(); ib.name(fieldName); builder.importedfield(ib); } } }
class DocumentManager { private boolean useV8GeoPositions = false; public DocumentManager useV8GeoPositions(boolean value) { this.useV8GeoPositions = value; return this; } public DocumentmanagerConfig.Builder produce(DocumentModel model, DocumentmanagerConfig.Builder documentConfigBuilder) { documentConfigBuilder.enablecompression(false); documentConfigBuilder.usev8geopositions(this.useV8GeoPositions); Set<DataType> handled = new HashSet<>(); for(NewDocumentType documentType : model.getDocumentManager().getTypes()) { buildConfig(documentType, documentConfigBuilder, handled); buildConfig(documentType.getAnnotations(), documentConfigBuilder); if (documentType != VespaDocumentType.INSTANCE && ! handled.contains(documentType)) { handled.add(documentType); DocumentmanagerConfig.Datatype.Builder dataTypeBuilder = new DocumentmanagerConfig.Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(documentType, dataTypeBuilder); } } return documentConfigBuilder; } @SuppressWarnings("deprecation") private void buildConfig(DataTypeCollection type, DocumentmanagerConfig.Builder documentConfigBuilder, Set<DataType> built) { List<DataType> todo = new ArrayList<>(type.getTypes()); Collections.sort(todo, (a, b) -> (a.getName().equals(b.getName()) ? a.getId() - b.getId() : a.getName().compareTo(b.getName()))); for (DataType dataType : todo) { if (built.contains(dataType)) continue; built.add(dataType); if (dataType instanceof TemporaryStructuredDataType) { throw new IllegalArgumentException("Can not create config for temporary data type: " + dataType.getName()); } if ((dataType.getId() < 0) || (dataType.getId()> DataType.lastPredefinedDataTypeId())) { Datatype.Builder dataTypeBuilder = new Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(dataType, dataTypeBuilder); } } } private void buildConfig(AnnotationType type, DocumentmanagerConfig.Annotationtype.Builder atb) { atb. id(type.getId()). name(type.getName()); if (type.getDataType() != null) { atb.datatype(type.getDataType().getId()); } if ( ! type.getInheritedTypes().isEmpty()) { for (AnnotationType inherited : type.getInheritedTypes()) { atb.inherits(new DocumentmanagerConfig.Annotationtype.Inherits.Builder().id(inherited.getId())); } } } private void buildConfig(Collection<AnnotationType> types, DocumentmanagerConfig.Builder builder) { for (AnnotationType type : types) { DocumentmanagerConfig.Annotationtype.Builder atb = new DocumentmanagerConfig.Annotationtype.Builder(); buildConfig(type, atb); builder.annotationtype(atb); } } @SuppressWarnings("deprecation") private void buildConfig(Set<FieldSet> fieldSets, Datatype.Documenttype.Builder doc) { for (FieldSet builtinFs : fieldSets) { buildConfig(builtinFs, doc); } } private void buildConfig(FieldSet fs, Datatype.Documenttype.Builder doc) { doc.fieldsets(fs.getName(), new Datatype.Documenttype.Fieldsets.Builder().fields(fs.getFieldNames())); } private void buildImportedFieldsConfig(Collection<String> fieldNames, Datatype.Documenttype.Builder builder) { for (String fieldName : fieldNames) { var ib = new DocumentmanagerConfig.Datatype.Documenttype.Importedfield.Builder(); ib.name(fieldName); builder.importedfield(ib); } } }
Throw ?
private void buildConfig(DataType type, Datatype.Builder builder) { builder.id(type.getId()); if (type instanceof ArrayDataType) { CollectionDataType dt = (CollectionDataType) type; builder.arraytype(new Datatype.Arraytype.Builder().datatype(dt.getNestedType().getId())); } else if (type instanceof WeightedSetDataType) { WeightedSetDataType dt = (WeightedSetDataType) type; builder.weightedsettype(new Datatype.Weightedsettype.Builder(). datatype(dt.getNestedType().getId()). createifnonexistant(dt.createIfNonExistent()). removeifzero(dt.removeIfZero())); } else if (type instanceof MapDataType) { MapDataType mtype = (MapDataType) type; builder.maptype(new Datatype.Maptype.Builder(). keytype(mtype.getKeyType().getId()). valtype(mtype.getValueType().getId())); } else if (type instanceof DocumentType) { System.err.println("still a DocumentType: "+type); DocumentType dt = (DocumentType) type; Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); builder.documenttype(doc); doc. name(dt.getName()). headerstruct(dt.contentStruct().getId()); for (DocumentType inherited : dt.getInheritedTypes()) { doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName())); } } else if (type instanceof NewDocumentType) { NewDocumentType dt = (NewDocumentType) type; Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); builder.documenttype(doc); doc. name(dt.getName()). headerstruct(dt.getHeader().getId()); for (NewDocumentType inherited : dt.getInherited()) { doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName())); } buildConfig(dt.getFieldSets(), doc); buildImportedFieldsConfig(dt.getImportedFieldNames(), doc); } else if (type instanceof TemporaryStructuredDataType) { System.err.println("still temporary [2]: "+type); } else if (type instanceof StructDataType) { StructDataType structType = (StructDataType) type; Datatype.Structtype.Builder structBuilder = new Datatype.Structtype.Builder(); builder.structtype(structBuilder); structBuilder.name(structType.getName()); for (com.yahoo.document.Field field : structType.getFieldsThisTypeOnly()) { Datatype.Structtype.Field.Builder fieldBuilder = new Datatype.Structtype.Field.Builder(); structBuilder.field(fieldBuilder); fieldBuilder.name(field.getName()); if (field.hasForcedId()) { fieldBuilder.id(new Datatype.Structtype.Field.Id.Builder().id(field.getId())); } fieldBuilder.datatype(field.getDataType().getId()); if (field.getDataType() instanceof TensorDataType) fieldBuilder.detailedtype(((TensorDataType)field.getDataType()).getTensorType().toString()); } for (StructDataType inherited : structType.getInheritedTypes()) { structBuilder.inherits(new Datatype.Structtype.Inherits.Builder().name(inherited.getName())); } } else if (type instanceof AnnotationReferenceDataType) { AnnotationReferenceDataType annotationRef = (AnnotationReferenceDataType) type; builder.annotationreftype(new Datatype.Annotationreftype.Builder().annotation(annotationRef.getAnnotationType().getName())); } else if (type instanceof TensorDataType) { } else if (type instanceof ReferenceDataType) { ReferenceDataType refType = (ReferenceDataType) type; builder.referencetype(new Datatype.Referencetype.Builder().target_type_id(refType.getTargetType().getId())); } else { throw new IllegalArgumentException("Can not create config for data type '" + type.getName()); } }
System.err.println("still temporary [2]: "+type);
private void buildConfig(DataType type, Datatype.Builder builder) { builder.id(type.getId()); if (type instanceof ArrayDataType) { CollectionDataType dt = (CollectionDataType) type; builder.arraytype(new Datatype.Arraytype.Builder().datatype(dt.getNestedType().getId())); } else if (type instanceof WeightedSetDataType) { WeightedSetDataType dt = (WeightedSetDataType) type; builder.weightedsettype(new Datatype.Weightedsettype.Builder(). datatype(dt.getNestedType().getId()). createifnonexistant(dt.createIfNonExistent()). removeifzero(dt.removeIfZero())); } else if (type instanceof MapDataType) { MapDataType mtype = (MapDataType) type; builder.maptype(new Datatype.Maptype.Builder(). keytype(mtype.getKeyType().getId()). valtype(mtype.getValueType().getId())); } else if (type instanceof DocumentType) { throw new IllegalArgumentException("Can not create config for unadorned document type: " + type.getName()); } else if (type instanceof NewDocumentType) { NewDocumentType dt = (NewDocumentType) type; Datatype.Documenttype.Builder doc = new Datatype.Documenttype.Builder(); builder.documenttype(doc); doc. name(dt.getName()). headerstruct(dt.getHeader().getId()); for (NewDocumentType inherited : dt.getInherited()) { doc.inherits(new Datatype.Documenttype.Inherits.Builder().name(inherited.getName())); } buildConfig(dt.getFieldSets(), doc); buildImportedFieldsConfig(dt.getImportedFieldNames(), doc); } else if (type instanceof TemporaryStructuredDataType) { throw new IllegalArgumentException("Can not create config for temporary data type: " + type.getName()); } else if (type instanceof StructDataType) { StructDataType structType = (StructDataType) type; Datatype.Structtype.Builder structBuilder = new Datatype.Structtype.Builder(); builder.structtype(structBuilder); structBuilder.name(structType.getName()); for (com.yahoo.document.Field field : structType.getFieldsThisTypeOnly()) { Datatype.Structtype.Field.Builder fieldBuilder = new Datatype.Structtype.Field.Builder(); structBuilder.field(fieldBuilder); fieldBuilder.name(field.getName()); if (field.hasForcedId()) { fieldBuilder.id(new Datatype.Structtype.Field.Id.Builder().id(field.getId())); } fieldBuilder.datatype(field.getDataType().getId()); if (field.getDataType() instanceof TensorDataType) fieldBuilder.detailedtype(((TensorDataType)field.getDataType()).getTensorType().toString()); } for (StructDataType inherited : structType.getInheritedTypes()) { structBuilder.inherits(new Datatype.Structtype.Inherits.Builder().name(inherited.getName())); } } else if (type instanceof AnnotationReferenceDataType) { AnnotationReferenceDataType annotationRef = (AnnotationReferenceDataType) type; builder.annotationreftype(new Datatype.Annotationreftype.Builder().annotation(annotationRef.getAnnotationType().getName())); } else if (type instanceof TensorDataType) { } else if (type instanceof ReferenceDataType) { ReferenceDataType refType = (ReferenceDataType) type; builder.referencetype(new Datatype.Referencetype.Builder().target_type_id(refType.getTargetType().getId())); } else { throw new IllegalArgumentException("Can not create config for data type '" + type.getName()); } }
class DocumentManager { private boolean useV8GeoPositions = false; public DocumentManager useV8GeoPositions(boolean value) { this.useV8GeoPositions = value; return this; } public DocumentmanagerConfig.Builder produce(DocumentModel model, DocumentmanagerConfig.Builder documentConfigBuilder) { documentConfigBuilder.enablecompression(false); documentConfigBuilder.usev8geopositions(this.useV8GeoPositions); Set<DataType> handled = new HashSet<>(); for(NewDocumentType documentType : model.getDocumentManager().getTypes()) { buildConfig(documentType, documentConfigBuilder, handled); buildConfig(documentType.getAnnotations(), documentConfigBuilder); if (documentType != VespaDocumentType.INSTANCE && ! handled.contains(documentType)) { handled.add(documentType); DocumentmanagerConfig.Datatype.Builder dataTypeBuilder = new DocumentmanagerConfig.Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(documentType, dataTypeBuilder); } } return documentConfigBuilder; } @SuppressWarnings("deprecation") private void buildConfig(DataTypeCollection type, DocumentmanagerConfig.Builder documentConfigBuilder, Set<DataType> built) { List<DataType> todo = new ArrayList<>(type.getTypes()); Collections.sort(todo, (a, b) -> (a.getName().equals(b.getName()) ? a.getId() - b.getId() : a.getName().compareTo(b.getName()))); for (DataType dataType : todo) { if (built.contains(dataType)) continue; built.add(dataType); if (dataType instanceof TemporaryStructuredDataType) { System.err.println("still temporary [1]: "+dataType); continue; } if ((dataType.getId() < 0) || (dataType.getId()> DataType.lastPredefinedDataTypeId())) { Datatype.Builder dataTypeBuilder = new Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(dataType, dataTypeBuilder); } } } private void buildConfig(AnnotationType type, DocumentmanagerConfig.Annotationtype.Builder atb) { atb. id(type.getId()). name(type.getName()); if (type.getDataType() != null) { atb.datatype(type.getDataType().getId()); } if ( ! type.getInheritedTypes().isEmpty()) { for (AnnotationType inherited : type.getInheritedTypes()) { atb.inherits(new DocumentmanagerConfig.Annotationtype.Inherits.Builder().id(inherited.getId())); } } } private void buildConfig(Collection<AnnotationType> types, DocumentmanagerConfig.Builder builder) { for (AnnotationType type : types) { DocumentmanagerConfig.Annotationtype.Builder atb = new DocumentmanagerConfig.Annotationtype.Builder(); buildConfig(type, atb); builder.annotationtype(atb); } } @SuppressWarnings("deprecation") private void buildConfig(Set<FieldSet> fieldSets, Datatype.Documenttype.Builder doc) { for (FieldSet builtinFs : fieldSets) { buildConfig(builtinFs, doc); } } private void buildConfig(FieldSet fs, Datatype.Documenttype.Builder doc) { doc.fieldsets(fs.getName(), new Datatype.Documenttype.Fieldsets.Builder().fields(fs.getFieldNames())); } private void buildImportedFieldsConfig(Collection<String> fieldNames, Datatype.Documenttype.Builder builder) { for (String fieldName : fieldNames) { var ib = new DocumentmanagerConfig.Datatype.Documenttype.Importedfield.Builder(); ib.name(fieldName); builder.importedfield(ib); } } }
class DocumentManager { private boolean useV8GeoPositions = false; public DocumentManager useV8GeoPositions(boolean value) { this.useV8GeoPositions = value; return this; } public DocumentmanagerConfig.Builder produce(DocumentModel model, DocumentmanagerConfig.Builder documentConfigBuilder) { documentConfigBuilder.enablecompression(false); documentConfigBuilder.usev8geopositions(this.useV8GeoPositions); Set<DataType> handled = new HashSet<>(); for(NewDocumentType documentType : model.getDocumentManager().getTypes()) { buildConfig(documentType, documentConfigBuilder, handled); buildConfig(documentType.getAnnotations(), documentConfigBuilder); if (documentType != VespaDocumentType.INSTANCE && ! handled.contains(documentType)) { handled.add(documentType); DocumentmanagerConfig.Datatype.Builder dataTypeBuilder = new DocumentmanagerConfig.Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(documentType, dataTypeBuilder); } } return documentConfigBuilder; } @SuppressWarnings("deprecation") private void buildConfig(DataTypeCollection type, DocumentmanagerConfig.Builder documentConfigBuilder, Set<DataType> built) { List<DataType> todo = new ArrayList<>(type.getTypes()); Collections.sort(todo, (a, b) -> (a.getName().equals(b.getName()) ? a.getId() - b.getId() : a.getName().compareTo(b.getName()))); for (DataType dataType : todo) { if (built.contains(dataType)) continue; built.add(dataType); if (dataType instanceof TemporaryStructuredDataType) { throw new IllegalArgumentException("Can not create config for temporary data type: " + dataType.getName()); } if ((dataType.getId() < 0) || (dataType.getId()> DataType.lastPredefinedDataTypeId())) { Datatype.Builder dataTypeBuilder = new Datatype.Builder(); documentConfigBuilder.datatype(dataTypeBuilder); buildConfig(dataType, dataTypeBuilder); } } } private void buildConfig(AnnotationType type, DocumentmanagerConfig.Annotationtype.Builder atb) { atb. id(type.getId()). name(type.getName()); if (type.getDataType() != null) { atb.datatype(type.getDataType().getId()); } if ( ! type.getInheritedTypes().isEmpty()) { for (AnnotationType inherited : type.getInheritedTypes()) { atb.inherits(new DocumentmanagerConfig.Annotationtype.Inherits.Builder().id(inherited.getId())); } } } private void buildConfig(Collection<AnnotationType> types, DocumentmanagerConfig.Builder builder) { for (AnnotationType type : types) { DocumentmanagerConfig.Annotationtype.Builder atb = new DocumentmanagerConfig.Annotationtype.Builder(); buildConfig(type, atb); builder.annotationtype(atb); } } @SuppressWarnings("deprecation") private void buildConfig(Set<FieldSet> fieldSets, Datatype.Documenttype.Builder doc) { for (FieldSet builtinFs : fieldSets) { buildConfig(builtinFs, doc); } } private void buildConfig(FieldSet fs, Datatype.Documenttype.Builder doc) { doc.fieldsets(fs.getName(), new Datatype.Documenttype.Fieldsets.Builder().fields(fs.getFieldNames())); } private void buildImportedFieldsConfig(Collection<String> fieldNames, Datatype.Documenttype.Builder builder) { for (String fieldName : fieldNames) { var ib = new DocumentmanagerConfig.Datatype.Documenttype.Importedfield.Builder(); ib.name(fieldName); builder.importedfield(ib); } } }
Consider changing this string to something indicating that is doesn't matter. Empty string would be just fine.
public void removing_paged_requires_override() throws Exception { try { new Fixture("field f1 type tensor(x[10]) { indexing: attribute \n attribute: paged }", "field f1 type tensor(x[10]) { indexing: attribute }"). assertValidation(newRestartAction(ClusterSpec.Id.from("test"), "Field 'f1' changed: add attribute 'huge'")); fail("Expected exception on removal of 'paged'"); } catch (ValidationOverrides.ValidationException e) { assertTrue(e.getMessage().contains(ValidationId.pagedSettingRemoval.toString())); } }
"Field 'f1' changed: add attribute 'huge'"));
public void removing_paged_requires_override() throws Exception { try { new Fixture("field f1 type tensor(x[10]) { indexing: attribute \n attribute: paged }", "field f1 type tensor(x[10]) { indexing: attribute }"). assertValidation(); fail("Expected exception on removal of 'paged'"); } catch (ValidationOverrides.ValidationException e) { assertTrue(e.getMessage().contains(ValidationId.pagedSettingRemoval.toString())); } }
class Fixture extends ContentClusterFixture { AttributeChangeValidator validator; public Fixture(String currentSd, String nextSd) throws Exception { super(currentSd, nextSd); validator = new AttributeChangeValidator(ClusterSpec.Id.from("test"), currentDb().getDerivedConfiguration().getAttributeFields(), currentDb().getDerivedConfiguration().getIndexSchema(), currentDocType(), nextDb().getDerivedConfiguration().getAttributeFields(), nextDb().getDerivedConfiguration().getIndexSchema(), nextDocType(), new ValidationOverrides(List.of()), new ManualClock().instant()); } @Override public List<VespaConfigChangeAction> validate() { return validator.validate(); } }
class Fixture extends ContentClusterFixture { AttributeChangeValidator validator; public Fixture(String currentSd, String nextSd) throws Exception { super(currentSd, nextSd); validator = new AttributeChangeValidator(ClusterSpec.Id.from("test"), currentDb().getDerivedConfiguration().getAttributeFields(), currentDb().getDerivedConfiguration().getIndexSchema(), currentDocType(), nextDb().getDerivedConfiguration().getAttributeFields(), nextDb().getDerivedConfiguration().getIndexSchema(), nextDocType(), new ValidationOverrides(List.of()), new ManualClock().instant()); } @Override public List<VespaConfigChangeAction> validate() { return validator.validate(); } }
Fixed - thanks!
public void removing_paged_requires_override() throws Exception { try { new Fixture("field f1 type tensor(x[10]) { indexing: attribute \n attribute: paged }", "field f1 type tensor(x[10]) { indexing: attribute }"). assertValidation(newRestartAction(ClusterSpec.Id.from("test"), "Field 'f1' changed: add attribute 'huge'")); fail("Expected exception on removal of 'paged'"); } catch (ValidationOverrides.ValidationException e) { assertTrue(e.getMessage().contains(ValidationId.pagedSettingRemoval.toString())); } }
"Field 'f1' changed: add attribute 'huge'"));
public void removing_paged_requires_override() throws Exception { try { new Fixture("field f1 type tensor(x[10]) { indexing: attribute \n attribute: paged }", "field f1 type tensor(x[10]) { indexing: attribute }"). assertValidation(); fail("Expected exception on removal of 'paged'"); } catch (ValidationOverrides.ValidationException e) { assertTrue(e.getMessage().contains(ValidationId.pagedSettingRemoval.toString())); } }
class Fixture extends ContentClusterFixture { AttributeChangeValidator validator; public Fixture(String currentSd, String nextSd) throws Exception { super(currentSd, nextSd); validator = new AttributeChangeValidator(ClusterSpec.Id.from("test"), currentDb().getDerivedConfiguration().getAttributeFields(), currentDb().getDerivedConfiguration().getIndexSchema(), currentDocType(), nextDb().getDerivedConfiguration().getAttributeFields(), nextDb().getDerivedConfiguration().getIndexSchema(), nextDocType(), new ValidationOverrides(List.of()), new ManualClock().instant()); } @Override public List<VespaConfigChangeAction> validate() { return validator.validate(); } }
class Fixture extends ContentClusterFixture { AttributeChangeValidator validator; public Fixture(String currentSd, String nextSd) throws Exception { super(currentSd, nextSd); validator = new AttributeChangeValidator(ClusterSpec.Id.from("test"), currentDb().getDerivedConfiguration().getAttributeFields(), currentDb().getDerivedConfiguration().getIndexSchema(), currentDocType(), nextDb().getDerivedConfiguration().getAttributeFields(), nextDb().getDerivedConfiguration().getIndexSchema(), nextDocType(), new ValidationOverrides(List.of()), new ManualClock().instant()); } @Override public List<VespaConfigChangeAction> validate() { return validator.validate(); } }
I think you need a a maximum backoff here.
private void backoff(int retryCount) { if (retryCount > 0) { try { Thread.sleep((long) (Math.pow(2, retryCount)) * sleepBetweenRetries.toMillis()); } catch (InterruptedException e) { /* ignored */ } } }
Thread.sleep((long) (Math.pow(2, retryCount)) * sleepBetweenRetries.toMillis());
private void backoff(int retryCount) { if (retryCount > 0) { try { long sleepTime = Math.min(120_000, (long) (Math.pow(2, retryCount)) * sleepBetweenRetries.toMillis()); Thread.sleep(sleepTime); } catch (InterruptedException e) { /* ignored */ } } }
class FileReferenceDownloader { private final static Logger log = Logger.getLogger(FileReferenceDownloader.class.getName()); private final ExecutorService downloadExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("filereference downloader")); private final ConnectionPool connectionPool; private final Downloads downloads; private final Duration downloadTimeout; private final Duration sleepBetweenRetries; private final Duration rpcTimeout; private final File downloadDirectory; FileReferenceDownloader(ConnectionPool connectionPool, Downloads downloads, Duration timeout, Duration sleepBetweenRetries, File downloadDirectory) { this.connectionPool = connectionPool; this.downloads = downloads; this.downloadTimeout = timeout; this.sleepBetweenRetries = sleepBetweenRetries; this.downloadDirectory = downloadDirectory; String timeoutString = System.getenv("VESPA_CONFIGPROXY_FILEDOWNLOAD_RPC_TIMEOUT"); this.rpcTimeout = Duration.ofSeconds(timeoutString == null ? 30 : Integer.parseInt(timeoutString)); } private void waitUntilDownloadStarted(FileReferenceDownload fileReferenceDownload) { FileReference fileReference = fileReferenceDownload.fileReference(); int retryCount = 0; Connection connection = connectionPool.getCurrent(); do { backoff(retryCount); if (FileDownloader.fileReferenceExists(fileReference, downloadDirectory)) return; if (startDownloadRpc(fileReferenceDownload, retryCount, connection)) return; retryCount++; connection = connectionPool.switchConnection(connection); } while (retryCount < 5); fileReferenceDownload.future().completeExceptionally(new RuntimeException("Failed getting " + fileReference)); downloads.remove(fileReference); } Future<Optional<File>> startDownload(FileReferenceDownload fileReferenceDownload) { FileReference fileReference = fileReferenceDownload.fileReference(); Optional<FileReferenceDownload> inProgress = downloads.get(fileReference); if (inProgress.isPresent()) return inProgress.get().future(); log.log(Level.FINE, () -> "Will download " + fileReference + " with timeout " + downloadTimeout); downloads.add(fileReferenceDownload); downloadExecutor.submit(() -> waitUntilDownloadStarted(fileReferenceDownload)); return fileReferenceDownload.future(); } void failedDownloading(FileReference fileReference) { downloads.remove(fileReference); } private boolean startDownloadRpc(FileReferenceDownload fileReferenceDownload, int retryCount, Connection connection) { Request request = createRequest(fileReferenceDownload); Duration rpcTimeout = rpcTimeout(retryCount); connection.invokeSync(request, rpcTimeout.getSeconds()); Level logLevel = (retryCount > 3 ? Level.INFO : Level.FINE); FileReference fileReference = fileReferenceDownload.fileReference(); if (validateResponse(request)) { log.log(Level.FINE, () -> "Request callback, OK. Req: " + request + "\nSpec: " + connection); if (request.returnValues().get(0).asInt32() == 0) { log.log(Level.FINE, () -> "Found " + fileReference + " available at " + connection.getAddress()); return true; } else { log.log(logLevel, fileReference + " not found at " + connection.getAddress()); return false; } } else { log.log(logLevel, "Downloading " + fileReference + " from " + connection.getAddress() + " failed: " + request + ", error: " + request.errorCode() + "(" + request.errorMessage() + "). Will switch config server for next request" + " (retry " + retryCount + ", rpc timeout " + rpcTimeout + ")"); return false; } } private Request createRequest(FileReferenceDownload fileReferenceDownload) { Request request = new Request("filedistribution.serveFile"); request.parameters().add(new StringValue(fileReferenceDownload.fileReference().value())); request.parameters().add(new Int32Value(fileReferenceDownload.downloadFromOtherSourceIfNotFound() ? 0 : 1)); return request; } private Duration rpcTimeout(int retryCount) { return Duration.ofSeconds(rpcTimeout.getSeconds()).plus(Duration.ofSeconds(retryCount * 10L)); } private boolean validateResponse(Request request) { if (request.isError()) { return false; } else if (request.returnValues().size() == 0) { return false; } else if (!request.checkReturnTypes("is")) { log.log(Level.WARNING, "Invalid return types for response: " + request.errorMessage()); return false; } return true; } public void close() { downloadExecutor.shutdown(); try { downloadExecutor.awaitTermination(1, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.interrupted(); } } }
class FileReferenceDownloader { private final static Logger log = Logger.getLogger(FileReferenceDownloader.class.getName()); private final ExecutorService downloadExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("filereference downloader")); private final ConnectionPool connectionPool; private final Downloads downloads; private final Duration downloadTimeout; private final Duration sleepBetweenRetries; private final Duration rpcTimeout; private final File downloadDirectory; FileReferenceDownloader(ConnectionPool connectionPool, Downloads downloads, Duration timeout, Duration sleepBetweenRetries, File downloadDirectory) { this.connectionPool = connectionPool; this.downloads = downloads; this.downloadTimeout = timeout; this.sleepBetweenRetries = sleepBetweenRetries; this.downloadDirectory = downloadDirectory; String timeoutString = System.getenv("VESPA_CONFIGPROXY_FILEDOWNLOAD_RPC_TIMEOUT"); this.rpcTimeout = Duration.ofSeconds(timeoutString == null ? 30 : Integer.parseInt(timeoutString)); } private void waitUntilDownloadStarted(FileReferenceDownload fileReferenceDownload) { FileReference fileReference = fileReferenceDownload.fileReference(); int retryCount = 0; Connection connection = connectionPool.getCurrent(); do { backoff(retryCount); if (FileDownloader.fileReferenceExists(fileReference, downloadDirectory)) return; if (startDownloadRpc(fileReferenceDownload, retryCount, connection)) return; retryCount++; connection = connectionPool.switchConnection(connection); } while (retryCount < 5); fileReferenceDownload.future().completeExceptionally(new RuntimeException("Failed getting " + fileReference)); downloads.remove(fileReference); } Future<Optional<File>> startDownload(FileReferenceDownload fileReferenceDownload) { FileReference fileReference = fileReferenceDownload.fileReference(); Optional<FileReferenceDownload> inProgress = downloads.get(fileReference); if (inProgress.isPresent()) return inProgress.get().future(); log.log(Level.FINE, () -> "Will download " + fileReference + " with timeout " + downloadTimeout); downloads.add(fileReferenceDownload); downloadExecutor.submit(() -> waitUntilDownloadStarted(fileReferenceDownload)); return fileReferenceDownload.future(); } void failedDownloading(FileReference fileReference) { downloads.remove(fileReference); } private boolean startDownloadRpc(FileReferenceDownload fileReferenceDownload, int retryCount, Connection connection) { Request request = createRequest(fileReferenceDownload); Duration rpcTimeout = rpcTimeout(retryCount); connection.invokeSync(request, rpcTimeout.getSeconds()); Level logLevel = (retryCount > 3 ? Level.INFO : Level.FINE); FileReference fileReference = fileReferenceDownload.fileReference(); if (validateResponse(request)) { log.log(Level.FINE, () -> "Request callback, OK. Req: " + request + "\nSpec: " + connection); if (request.returnValues().get(0).asInt32() == 0) { log.log(Level.FINE, () -> "Found " + fileReference + " available at " + connection.getAddress()); return true; } else { log.log(logLevel, fileReference + " not found at " + connection.getAddress()); return false; } } else { log.log(logLevel, "Downloading " + fileReference + " from " + connection.getAddress() + " failed: " + request + ", error: " + request.errorCode() + "(" + request.errorMessage() + "). Will switch config server for next request" + " (retry " + retryCount + ", rpc timeout " + rpcTimeout + ")"); return false; } } private Request createRequest(FileReferenceDownload fileReferenceDownload) { Request request = new Request("filedistribution.serveFile"); request.parameters().add(new StringValue(fileReferenceDownload.fileReference().value())); request.parameters().add(new Int32Value(fileReferenceDownload.downloadFromOtherSourceIfNotFound() ? 0 : 1)); return request; } private Duration rpcTimeout(int retryCount) { return Duration.ofSeconds(rpcTimeout.getSeconds()).plus(Duration.ofSeconds(retryCount * 10L)); } private boolean validateResponse(Request request) { if (request.isError()) { return false; } else if (request.returnValues().size() == 0) { return false; } else if (!request.checkReturnTypes("is")) { log.log(Level.WARNING, "Invalid return types for response: " + request.errorMessage()); return false; } return true; } public void close() { downloadExecutor.shutdown(); try { downloadExecutor.awaitTermination(1, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.interrupted(); } } }
`retryCount` is max. 4, but maybe it's best to have a max if that changes. Will fix.
private void backoff(int retryCount) { if (retryCount > 0) { try { Thread.sleep((long) (Math.pow(2, retryCount)) * sleepBetweenRetries.toMillis()); } catch (InterruptedException e) { /* ignored */ } } }
Thread.sleep((long) (Math.pow(2, retryCount)) * sleepBetweenRetries.toMillis());
private void backoff(int retryCount) { if (retryCount > 0) { try { long sleepTime = Math.min(120_000, (long) (Math.pow(2, retryCount)) * sleepBetweenRetries.toMillis()); Thread.sleep(sleepTime); } catch (InterruptedException e) { /* ignored */ } } }
class FileReferenceDownloader { private final static Logger log = Logger.getLogger(FileReferenceDownloader.class.getName()); private final ExecutorService downloadExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("filereference downloader")); private final ConnectionPool connectionPool; private final Downloads downloads; private final Duration downloadTimeout; private final Duration sleepBetweenRetries; private final Duration rpcTimeout; private final File downloadDirectory; FileReferenceDownloader(ConnectionPool connectionPool, Downloads downloads, Duration timeout, Duration sleepBetweenRetries, File downloadDirectory) { this.connectionPool = connectionPool; this.downloads = downloads; this.downloadTimeout = timeout; this.sleepBetweenRetries = sleepBetweenRetries; this.downloadDirectory = downloadDirectory; String timeoutString = System.getenv("VESPA_CONFIGPROXY_FILEDOWNLOAD_RPC_TIMEOUT"); this.rpcTimeout = Duration.ofSeconds(timeoutString == null ? 30 : Integer.parseInt(timeoutString)); } private void waitUntilDownloadStarted(FileReferenceDownload fileReferenceDownload) { FileReference fileReference = fileReferenceDownload.fileReference(); int retryCount = 0; Connection connection = connectionPool.getCurrent(); do { backoff(retryCount); if (FileDownloader.fileReferenceExists(fileReference, downloadDirectory)) return; if (startDownloadRpc(fileReferenceDownload, retryCount, connection)) return; retryCount++; connection = connectionPool.switchConnection(connection); } while (retryCount < 5); fileReferenceDownload.future().completeExceptionally(new RuntimeException("Failed getting " + fileReference)); downloads.remove(fileReference); } Future<Optional<File>> startDownload(FileReferenceDownload fileReferenceDownload) { FileReference fileReference = fileReferenceDownload.fileReference(); Optional<FileReferenceDownload> inProgress = downloads.get(fileReference); if (inProgress.isPresent()) return inProgress.get().future(); log.log(Level.FINE, () -> "Will download " + fileReference + " with timeout " + downloadTimeout); downloads.add(fileReferenceDownload); downloadExecutor.submit(() -> waitUntilDownloadStarted(fileReferenceDownload)); return fileReferenceDownload.future(); } void failedDownloading(FileReference fileReference) { downloads.remove(fileReference); } private boolean startDownloadRpc(FileReferenceDownload fileReferenceDownload, int retryCount, Connection connection) { Request request = createRequest(fileReferenceDownload); Duration rpcTimeout = rpcTimeout(retryCount); connection.invokeSync(request, rpcTimeout.getSeconds()); Level logLevel = (retryCount > 3 ? Level.INFO : Level.FINE); FileReference fileReference = fileReferenceDownload.fileReference(); if (validateResponse(request)) { log.log(Level.FINE, () -> "Request callback, OK. Req: " + request + "\nSpec: " + connection); if (request.returnValues().get(0).asInt32() == 0) { log.log(Level.FINE, () -> "Found " + fileReference + " available at " + connection.getAddress()); return true; } else { log.log(logLevel, fileReference + " not found at " + connection.getAddress()); return false; } } else { log.log(logLevel, "Downloading " + fileReference + " from " + connection.getAddress() + " failed: " + request + ", error: " + request.errorCode() + "(" + request.errorMessage() + "). Will switch config server for next request" + " (retry " + retryCount + ", rpc timeout " + rpcTimeout + ")"); return false; } } private Request createRequest(FileReferenceDownload fileReferenceDownload) { Request request = new Request("filedistribution.serveFile"); request.parameters().add(new StringValue(fileReferenceDownload.fileReference().value())); request.parameters().add(new Int32Value(fileReferenceDownload.downloadFromOtherSourceIfNotFound() ? 0 : 1)); return request; } private Duration rpcTimeout(int retryCount) { return Duration.ofSeconds(rpcTimeout.getSeconds()).plus(Duration.ofSeconds(retryCount * 10L)); } private boolean validateResponse(Request request) { if (request.isError()) { return false; } else if (request.returnValues().size() == 0) { return false; } else if (!request.checkReturnTypes("is")) { log.log(Level.WARNING, "Invalid return types for response: " + request.errorMessage()); return false; } return true; } public void close() { downloadExecutor.shutdown(); try { downloadExecutor.awaitTermination(1, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.interrupted(); } } }
class FileReferenceDownloader { private final static Logger log = Logger.getLogger(FileReferenceDownloader.class.getName()); private final ExecutorService downloadExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("filereference downloader")); private final ConnectionPool connectionPool; private final Downloads downloads; private final Duration downloadTimeout; private final Duration sleepBetweenRetries; private final Duration rpcTimeout; private final File downloadDirectory; FileReferenceDownloader(ConnectionPool connectionPool, Downloads downloads, Duration timeout, Duration sleepBetweenRetries, File downloadDirectory) { this.connectionPool = connectionPool; this.downloads = downloads; this.downloadTimeout = timeout; this.sleepBetweenRetries = sleepBetweenRetries; this.downloadDirectory = downloadDirectory; String timeoutString = System.getenv("VESPA_CONFIGPROXY_FILEDOWNLOAD_RPC_TIMEOUT"); this.rpcTimeout = Duration.ofSeconds(timeoutString == null ? 30 : Integer.parseInt(timeoutString)); } private void waitUntilDownloadStarted(FileReferenceDownload fileReferenceDownload) { FileReference fileReference = fileReferenceDownload.fileReference(); int retryCount = 0; Connection connection = connectionPool.getCurrent(); do { backoff(retryCount); if (FileDownloader.fileReferenceExists(fileReference, downloadDirectory)) return; if (startDownloadRpc(fileReferenceDownload, retryCount, connection)) return; retryCount++; connection = connectionPool.switchConnection(connection); } while (retryCount < 5); fileReferenceDownload.future().completeExceptionally(new RuntimeException("Failed getting " + fileReference)); downloads.remove(fileReference); } Future<Optional<File>> startDownload(FileReferenceDownload fileReferenceDownload) { FileReference fileReference = fileReferenceDownload.fileReference(); Optional<FileReferenceDownload> inProgress = downloads.get(fileReference); if (inProgress.isPresent()) return inProgress.get().future(); log.log(Level.FINE, () -> "Will download " + fileReference + " with timeout " + downloadTimeout); downloads.add(fileReferenceDownload); downloadExecutor.submit(() -> waitUntilDownloadStarted(fileReferenceDownload)); return fileReferenceDownload.future(); } void failedDownloading(FileReference fileReference) { downloads.remove(fileReference); } private boolean startDownloadRpc(FileReferenceDownload fileReferenceDownload, int retryCount, Connection connection) { Request request = createRequest(fileReferenceDownload); Duration rpcTimeout = rpcTimeout(retryCount); connection.invokeSync(request, rpcTimeout.getSeconds()); Level logLevel = (retryCount > 3 ? Level.INFO : Level.FINE); FileReference fileReference = fileReferenceDownload.fileReference(); if (validateResponse(request)) { log.log(Level.FINE, () -> "Request callback, OK. Req: " + request + "\nSpec: " + connection); if (request.returnValues().get(0).asInt32() == 0) { log.log(Level.FINE, () -> "Found " + fileReference + " available at " + connection.getAddress()); return true; } else { log.log(logLevel, fileReference + " not found at " + connection.getAddress()); return false; } } else { log.log(logLevel, "Downloading " + fileReference + " from " + connection.getAddress() + " failed: " + request + ", error: " + request.errorCode() + "(" + request.errorMessage() + "). Will switch config server for next request" + " (retry " + retryCount + ", rpc timeout " + rpcTimeout + ")"); return false; } } private Request createRequest(FileReferenceDownload fileReferenceDownload) { Request request = new Request("filedistribution.serveFile"); request.parameters().add(new StringValue(fileReferenceDownload.fileReference().value())); request.parameters().add(new Int32Value(fileReferenceDownload.downloadFromOtherSourceIfNotFound() ? 0 : 1)); return request; } private Duration rpcTimeout(int retryCount) { return Duration.ofSeconds(rpcTimeout.getSeconds()).plus(Duration.ofSeconds(retryCount * 10L)); } private boolean validateResponse(Request request) { if (request.isError()) { return false; } else if (request.returnValues().size() == 0) { return false; } else if (!request.checkReturnTypes("is")) { log.log(Level.WARNING, "Invalid return types for response: " + request.errorMessage()); return false; } return true; } public void close() { downloadExecutor.shutdown(); try { downloadExecutor.awaitTermination(1, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.interrupted(); } } }
😀
public ExpressionNode getValue() { return value; }
return value;
public ExpressionNode getValue() { return value; }
class AttributeNode implements ExpressionNode { private ExpressionNode value; private final List<Item> items; public AttributeNode(ExpressionNode value, List<Item> items) { this.value = value; this.items = new ArrayList<>(items); } public AttributeNode setValue(ExpressionNode value) { this.value = value; return this; } public List<Item> getItems() { return items; } @Override public BucketSet getBucketSet(BucketIdFactory factory) { return null; } @Override public Object evaluate(Context context) { StringBuilder pos = new StringBuilder(value.toString()); Object obj = value.evaluate(context); StringBuilder builder = new StringBuilder(); for (Item item : items) { if (obj == null) { throw new IllegalStateException("Can not invoke '" + item + "' on '" + pos + "' because that term " + "evaluated to null."); } if (item.getType() != Item.FUNCTION) { if (builder.length() > 0) { builder.append("."); } builder.append(item.getName()); } else { if (builder.length() > 0) { obj = evaluateFieldPath(builder.toString(), obj); builder = new StringBuilder(); } obj = evaluateFunction(item.getName(), obj); } pos.append(".").append(item); } if (builder.length() > 0) { obj = evaluateFieldPath(builder.toString(), obj); } return obj; } public static class VariableValueList extends ArrayList<ResultList.VariableValue> { } static class IteratorHandler extends FieldPathIteratorHandler { VariableValueList values = new VariableValueList(); @Override public void onPrimitive(FieldValue fv) { values.add(new ResultList.VariableValue((VariableMap)getVariables().clone(), fv)); } } private static Object applyFunction(String function, Object value) { if (function.equalsIgnoreCase("abs")) { if (value instanceof Number) { Number nValue = (Number)value; if (value instanceof Double) { return nValue.doubleValue() * (nValue.doubleValue() < 0 ? -1 : 1); } else if (value instanceof Float) { return nValue.floatValue() * (nValue.floatValue() < 0 ? -1 : 1); } else if (value instanceof Long) { return nValue.longValue() * (nValue.longValue() < 0 ? -1 : 1); } else if (value instanceof Integer) { return nValue.intValue() * (nValue.intValue() < 0 ? -1 : 1); } } throw new IllegalStateException("Function 'abs' is only available for numerical values."); } else if (function.equalsIgnoreCase("hash")) { return BobHash.hash(value.toString()); } else if (function.equalsIgnoreCase("lowercase")) { return value.toString().toLowerCase(); } else if (function.equalsIgnoreCase("uppercase")) { return value.toString().toUpperCase(); } throw new IllegalStateException("Function '" + function + "' is not supported."); } private static boolean looksLikeComplexFieldPath(String path) { for (int i = 0; i < path.length(); ++i) { switch (path.charAt(i)) { case '.': case '{': case '[': return true; } } return false; } private static boolean isSimpleImportedField(String path, DocumentType documentType) { if (looksLikeComplexFieldPath(path)) { return false; } return documentType.hasImportedField(path); } private static Object evaluateFieldPath(String fieldPathStr, Object value) { if (value instanceof DocumentPut) { Document doc = ((DocumentPut) value).getDocument(); if (isSimpleImportedField(fieldPathStr, doc.getDataType())) { return null; } FieldPath fieldPath = doc.getDataType().buildFieldPath(fieldPathStr); IteratorHandler handler = new IteratorHandler(); doc.iterateNested(fieldPath, 0, handler); if (handler.values.isEmpty()) { return null; } return handler.values; } else if (value instanceof DocumentUpdate) { return Result.INVALID; } else if (value instanceof DocumentRemove) { return Result.INVALID; } else if (value instanceof DocumentGet) { return Result.INVALID; } return Result.FALSE; } private static Object evaluateFunction(String function, Object value) { if (value instanceof VariableValueList) { VariableValueList retVal = new VariableValueList(); for (ResultList.VariableValue val : ((VariableValueList)value)) { retVal.add(new ResultList.VariableValue( (FieldPathIteratorHandler.VariableMap)val.getVariables().clone(), applyFunction(function, val.getValue()))); } return retVal; } return applyFunction(function, value); } @Override public void accept(Visitor visitor) { visitor.visit(this); } @Override public String toString() { StringBuilder ret = new StringBuilder(); ret.append(value); for (Item item : items) { ret.append(".").append(item); } return ret.toString(); } public static class Item { public static final int ATTRIBUTE = 0; public static final int FUNCTION = 1; private String name; private int type = ATTRIBUTE; public Item(String name) { this.name = name; } public String getName() { return name; } public Item setName(String name) { this.name = name; return this; } public int getType() { return type; } public Item setType(int type) { this.type = type; return this; } @Override public String toString() { return name + (type == FUNCTION ? "()" : ""); } } }
class AttributeNode implements ExpressionNode { private ExpressionNode value; private final List<Item> items; public AttributeNode(ExpressionNode value, List<Item> items) { this.value = value; this.items = new ArrayList<>(items); } public AttributeNode setValue(ExpressionNode value) { this.value = value; return this; } public List<Item> getItems() { return items; } @Override public BucketSet getBucketSet(BucketIdFactory factory) { return null; } @Override public Object evaluate(Context context) { StringBuilder pos = new StringBuilder(value.toString()); Object obj = value.evaluate(context); StringBuilder builder = new StringBuilder(); for (Item item : items) { if (obj == null) { throw new IllegalStateException("Can not invoke '" + item + "' on '" + pos + "' because that term " + "evaluated to null."); } if (item.getType() != Item.FUNCTION) { if (builder.length() > 0) { builder.append("."); } builder.append(item.getName()); } else { if (builder.length() > 0) { obj = evaluateFieldPath(builder.toString(), obj); builder = new StringBuilder(); } obj = evaluateFunction(item.getName(), obj); } pos.append(".").append(item); } if (builder.length() > 0) { obj = evaluateFieldPath(builder.toString(), obj); } return obj; } public static class VariableValueList extends ArrayList<ResultList.VariableValue> { } static class IteratorHandler extends FieldPathIteratorHandler { VariableValueList values = new VariableValueList(); @Override public void onPrimitive(FieldValue fv) { values.add(new ResultList.VariableValue((VariableMap)getVariables().clone(), fv)); } } private static Object applyFunction(String function, Object value) { if (function.equalsIgnoreCase("abs")) { if (value instanceof Number) { Number nValue = (Number)value; if (value instanceof Double) { return nValue.doubleValue() * (nValue.doubleValue() < 0 ? -1 : 1); } else if (value instanceof Float) { return nValue.floatValue() * (nValue.floatValue() < 0 ? -1 : 1); } else if (value instanceof Long) { return nValue.longValue() * (nValue.longValue() < 0 ? -1 : 1); } else if (value instanceof Integer) { return nValue.intValue() * (nValue.intValue() < 0 ? -1 : 1); } } throw new IllegalStateException("Function 'abs' is only available for numerical values."); } else if (function.equalsIgnoreCase("hash")) { return BobHash.hash(value.toString()); } else if (function.equalsIgnoreCase("lowercase")) { return value.toString().toLowerCase(); } else if (function.equalsIgnoreCase("uppercase")) { return value.toString().toUpperCase(); } throw new IllegalStateException("Function '" + function + "' is not supported."); } private static boolean looksLikeComplexFieldPath(String path) { for (int i = 0; i < path.length(); ++i) { switch (path.charAt(i)) { case '.': case '{': case '[': return true; } } return false; } private static boolean isSimpleImportedField(String path, DocumentType documentType) { if (looksLikeComplexFieldPath(path)) { return false; } return documentType.hasImportedField(path); } private static Object evaluateFieldPath(String fieldPathStr, Object value) { if (value instanceof DocumentPut) { Document doc = ((DocumentPut) value).getDocument(); if (isSimpleImportedField(fieldPathStr, doc.getDataType())) { return null; } FieldPath fieldPath = doc.getDataType().buildFieldPath(fieldPathStr); IteratorHandler handler = new IteratorHandler(); doc.iterateNested(fieldPath, 0, handler); if (handler.values.isEmpty()) { return null; } return handler.values; } else if (value instanceof DocumentUpdate) { return Result.INVALID; } else if (value instanceof DocumentRemove) { return Result.INVALID; } else if (value instanceof DocumentGet) { return Result.INVALID; } return Result.FALSE; } private static Object evaluateFunction(String function, Object value) { if (value instanceof VariableValueList) { VariableValueList retVal = new VariableValueList(); for (ResultList.VariableValue val : ((VariableValueList)value)) { retVal.add(new ResultList.VariableValue( (FieldPathIteratorHandler.VariableMap)val.getVariables().clone(), applyFunction(function, val.getValue()))); } return retVal; } return applyFunction(function, value); } @Override public void accept(Visitor visitor) { visitor.visit(this); } @Override public String toString() { StringBuilder ret = new StringBuilder(); ret.append(value); for (Item item : items) { ret.append(".").append(item); } return ret.toString(); } public static class Item { public static final int ATTRIBUTE = 0; public static final int FUNCTION = 1; private String name; private int type = ATTRIBUTE; public Item(String name) { this.name = name; } public String getName() { return name; } public Item setName(String name) { this.name = name; return this; } public int getType() { return type; } public Item setType(int type) { this.type = type; return this; } @Override public String toString() { return name + (type == FUNCTION ? "()" : ""); } } }
This metric is already exported
private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("vds.datastored.alldisks.docs.average")); metrics.add(new Metric("vds.datastored.alldisks.bytes.average")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.max")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.count")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.average")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate")); metrics.add(new Metric("vds.idealstate.buckets_rechecking.average")); metrics.add(new Metric("vds.idealstate.idealstate_diff.average")); metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average")); metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average")); metrics.add(new Metric("vds.idealstate.buckets.average")); metrics.add(new Metric("vds.idealstate.buckets_notrusted.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_moving_out.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_out.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_in.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_syncing.average")); metrics.add(new Metric("vds.idealstate.max_observed_time_since_last_gc_sec.average")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.blocked.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.throttled.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_changed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_blocked.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.count")); metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.rate")); metrics.add(new Metric("vds.distributor.puts.sum.latency.max")); metrics.add(new Metric("vds.distributor.puts.sum.latency.sum")); metrics.add(new Metric("vds.distributor.puts.sum.latency.count")); metrics.add(new Metric("vds.distributor.puts.sum.latency.average")); metrics.add(new Metric("vds.distributor.puts.sum.ok.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notconnected.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notready.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.wrongdistributor.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.safe_time_not_reached.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.storagefailure.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.timeout.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.busy.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.inconsistent_bucket.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.removes.sum.latency.max")); metrics.add(new Metric("vds.distributor.removes.sum.latency.sum")); metrics.add(new Metric("vds.distributor.removes.sum.latency.count")); metrics.add(new Metric("vds.distributor.removes.sum.latency.average")); metrics.add(new Metric("vds.distributor.removes.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.updates.sum.latency.max")); metrics.add(new Metric("vds.distributor.updates.sum.latency.sum")); metrics.add(new Metric("vds.distributor.updates.sum.latency.count")); metrics.add(new Metric("vds.distributor.updates.sum.latency.average")); metrics.add(new Metric("vds.distributor.updates.sum.ok.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.latency.max")); metrics.add(new Metric("vds.distributor.gets.sum.latency.sum")); metrics.add(new Metric("vds.distributor.gets.sum.latency.count")); metrics.add(new Metric("vds.distributor.gets.sum.latency.average")); metrics.add(new Metric("vds.distributor.gets.sum.ok.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.max")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.count")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.average")); metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.docsstored.average")); metrics.add(new Metric("vds.distributor.bytesstored.average")); metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count")); metrics.add(new Metric("vds.mergethrottler.queuesize.max")); metrics.add(new Metric("vds.mergethrottler.queuesize.sum")); metrics.add(new Metric("vds.mergethrottler.queuesize.count")); metrics.add(new Metric("vds.mergethrottler.active_window_size.max")); metrics.add(new Metric("vds.mergethrottler.active_window_size.sum")); metrics.add(new Metric("vds.mergethrottler.active_window_size.count")); metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate")); metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.failures.busy.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.failures.total.rate")); return metrics; }
metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate"));
private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("vds.datastored.alldisks.docs.average")); metrics.add(new Metric("vds.datastored.alldisks.bytes.average")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.max")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.count")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.average")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate")); metrics.add(new Metric("vds.idealstate.buckets_rechecking.average")); metrics.add(new Metric("vds.idealstate.idealstate_diff.average")); metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average")); metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average")); metrics.add(new Metric("vds.idealstate.buckets.average")); metrics.add(new Metric("vds.idealstate.buckets_notrusted.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_moving_out.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_out.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_in.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_syncing.average")); metrics.add(new Metric("vds.idealstate.max_observed_time_since_last_gc_sec.average")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.blocked.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.throttled.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_changed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_blocked.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.count")); metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.rate")); metrics.add(new Metric("vds.distributor.puts.sum.latency.max")); metrics.add(new Metric("vds.distributor.puts.sum.latency.sum")); metrics.add(new Metric("vds.distributor.puts.sum.latency.count")); metrics.add(new Metric("vds.distributor.puts.sum.latency.average")); metrics.add(new Metric("vds.distributor.puts.sum.ok.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notconnected.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notready.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.wrongdistributor.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.safe_time_not_reached.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.storagefailure.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.timeout.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.busy.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.inconsistent_bucket.rate")); metrics.add(new Metric("vds.distributor.removes.sum.latency.max")); metrics.add(new Metric("vds.distributor.removes.sum.latency.sum")); metrics.add(new Metric("vds.distributor.removes.sum.latency.count")); metrics.add(new Metric("vds.distributor.removes.sum.latency.average")); metrics.add(new Metric("vds.distributor.removes.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.updates.sum.latency.max")); metrics.add(new Metric("vds.distributor.updates.sum.latency.sum")); metrics.add(new Metric("vds.distributor.updates.sum.latency.count")); metrics.add(new Metric("vds.distributor.updates.sum.latency.average")); metrics.add(new Metric("vds.distributor.updates.sum.ok.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.latency.max")); metrics.add(new Metric("vds.distributor.gets.sum.latency.sum")); metrics.add(new Metric("vds.distributor.gets.sum.latency.count")); metrics.add(new Metric("vds.distributor.gets.sum.latency.average")); metrics.add(new Metric("vds.distributor.gets.sum.ok.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.max")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.count")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.average")); metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.docsstored.average")); metrics.add(new Metric("vds.distributor.bytesstored.average")); metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count")); metrics.add(new Metric("vds.mergethrottler.queuesize.max")); metrics.add(new Metric("vds.mergethrottler.queuesize.sum")); metrics.add(new Metric("vds.mergethrottler.queuesize.count")); metrics.add(new Metric("vds.mergethrottler.active_window_size.max")); metrics.add(new Metric("vds.mergethrottler.active_window_size.sum")); metrics.add(new Metric("vds.mergethrottler.active_window_size.count")); metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate")); metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.failures.busy.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.failures.total.rate")); return metrics; }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getQrserverMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("sentinel.restarts.count")); metrics.add(new Metric("sentinel.totalRestarts.last")); metrics.add(new Metric("sentinel.uptime.last")); metrics.add(new Metric("sentinel.running.count")); metrics.add(new Metric("sentinel.running.last")); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("slobrok.heartbeats.failed.count")); metrics.add(new Metric("logd.processed.lines.count")); metrics.add(new Metric("worker.connections.max")); metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures")); metrics.add(new Metric("jrt.transport.peer-authorization-failures")); metrics.add(new Metric("jrt.transport.server.tls-connections-established")); metrics.add(new Metric("jrt.transport.client.tls-connections-established")); metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established")); metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established")); metrics.add(new Metric("vds.server.network.tls-handshakes-failed")); metrics.add(new Metric("vds.server.network.peer-authorization-failures")); metrics.add(new Metric("vds.server.network.client.tls-connections-established")); metrics.add(new Metric("vds.server.network.server.tls-connections-established")); metrics.add(new Metric("vds.server.network.client.insecure-connections-established")); metrics.add(new Metric("vds.server.network.server.insecure-connections-established")); metrics.add(new Metric("vds.server.network.tls-connections-broken")); metrics.add(new Metric("vds.server.network.failed-tls-config-reloads")); metrics.add(new Metric("vds.server.fnet.num-connections")); metrics.add(new Metric("node-certificate.expiry.seconds")); return metrics; } private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("configserver.requests.count")); metrics.add(new Metric("configserver.failedRequests.count")); metrics.add(new Metric("configserver.latency.max")); metrics.add(new Metric("configserver.latency.sum")); metrics.add(new Metric("configserver.latency.count")); metrics.add(new Metric("configserver.latency.average")); metrics.add(new Metric("configserver.cacheConfigElems.last")); metrics.add(new Metric("configserver.cacheChecksumElems.last")); metrics.add(new Metric("configserver.hosts.last")); metrics.add(new Metric("configserver.delayedResponses.count")); metrics.add(new Metric("configserver.sessionChangeErrors.count")); metrics.add(new Metric("configserver.zkZNodes.last")); metrics.add(new Metric("configserver.zkAvgLatency.last")); metrics.add(new Metric("configserver.zkMaxLatency.last")); metrics.add(new Metric("configserver.zkConnections.last")); metrics.add(new Metric("configserver.zkOutstandingRequests.last")); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, "jdisc.http.requests", List.of("rate", "count")); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics.add(new Metric("handled.latency.count")); metrics.add(new Metric("handled.latency.average")); metrics.add(new Metric("serverRejectedRequests.rate")); metrics.add(new Metric("serverRejectedRequests.count")); metrics.add(new Metric("serverThreadPoolSize.average")); metrics.add(new Metric("serverThreadPoolSize.min")); metrics.add(new Metric("serverThreadPoolSize.max")); metrics.add(new Metric("serverThreadPoolSize.rate")); metrics.add(new Metric("serverThreadPoolSize.count")); metrics.add(new Metric("serverThreadPoolSize.last")); metrics.add(new Metric("serverActiveThreads.average")); metrics.add(new Metric("serverActiveThreads.min")); metrics.add(new Metric("serverActiveThreads.max")); metrics.add(new Metric("serverActiveThreads.rate")); metrics.add(new Metric("serverActiveThreads.sum")); metrics.add(new Metric("serverActiveThreads.count")); metrics.add(new Metric("serverActiveThreads.last")); metrics.add(new Metric("serverNumOpenConnections.average")); metrics.add(new Metric("serverNumOpenConnections.max")); metrics.add(new Metric("serverNumOpenConnections.last")); metrics.add(new Metric("serverNumConnections.average")); metrics.add(new Metric("serverNumConnections.max")); metrics.add(new Metric("serverNumConnections.last")); { List<String> suffixes = List.of("sum", "count", "last", "min", "max"); addMetric(metrics, "jdisc.thread_pool.unhandled_exceptions", suffixes); addMetric(metrics, "jdisc.thread_pool.work_queue.capacity", suffixes); addMetric(metrics, "jdisc.thread_pool.work_queue.size", suffixes); } metrics.add(new Metric("httpapi_latency.max")); metrics.add(new Metric("httpapi_latency.sum")); metrics.add(new Metric("httpapi_latency.count")); metrics.add(new Metric("httpapi_latency.average")); metrics.add(new Metric("httpapi_pending.max")); metrics.add(new Metric("httpapi_pending.sum")); metrics.add(new Metric("httpapi_pending.count")); metrics.add(new Metric("httpapi_pending.average")); metrics.add(new Metric("httpapi_num_operations.rate")); metrics.add(new Metric("httpapi_num_updates.rate")); metrics.add(new Metric("httpapi_num_removes.rate")); metrics.add(new Metric("httpapi_num_puts.rate")); metrics.add(new Metric("httpapi_succeeded.rate")); metrics.add(new Metric("httpapi_failed.rate")); metrics.add(new Metric("httpapi_parse_error.rate")); addMetric(metrics, "httpapi_condition_not_met", List.of("rate")); metrics.add(new Metric("mem.heap.total.average")); metrics.add(new Metric("mem.heap.free.average")); metrics.add(new Metric("mem.heap.used.average")); metrics.add(new Metric("mem.heap.used.max")); metrics.add(new Metric("jdisc.memory_mappings.max")); metrics.add(new Metric("jdisc.open_file_descriptors.max")); metrics.add(new Metric("jdisc.gc.count.average")); metrics.add(new Metric("jdisc.gc.count.max")); metrics.add(new Metric("jdisc.gc.count.last")); metrics.add(new Metric("jdisc.gc.ms.average")); metrics.add(new Metric("jdisc.gc.ms.max")); metrics.add(new Metric("jdisc.gc.ms.last")); metrics.add(new Metric("jdisc.deactivated_containers.total.last")); metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last")); metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last")); metrics.add(new Metric("container-iam-role.expiry.seconds")); metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate")); addMetric(metrics, "jdisc.http.request.requests_per_connection", List.of("sum", "count", "min", "max", "average")); metrics.add(new Metric("http.status.1xx.rate")); metrics.add(new Metric("http.status.2xx.rate")); metrics.add(new Metric("http.status.3xx.rate")); metrics.add(new Metric("http.status.4xx.rate")); metrics.add(new Metric("http.status.5xx.rate")); metrics.add(new Metric("http.status.401.rate")); metrics.add(new Metric("http.status.403.rate")); metrics.add(new Metric("jdisc.http.request.uri_length.max")); metrics.add(new Metric("jdisc.http.request.uri_length.sum")); metrics.add(new Metric("jdisc.http.request.uri_length.count")); metrics.add(new Metric("jdisc.http.request.uri_length.average")); metrics.add(new Metric("jdisc.http.request.content_size.max")); metrics.add(new Metric("jdisc.http.request.content_size.sum")); metrics.add(new Metric("jdisc.http.request.content_size.count")); metrics.add(new Metric("jdisc.http.request.content_size.average")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.missing_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.expired_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.invalid_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_protocols.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_ciphers.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.unknown.rate")); metrics.add(new Metric("jdisc.http.handler.unhandled_exceptions.rate")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.max", List.of("last")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.reserved", List.of("last")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.busy", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.total", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.jetty.threadpool.queue.size", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.filtering.request.handled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.request.unhandled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.response.handled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.response.unhandled", List.of("rate")); addMetric(metrics, "jdisc.application.failed_component_graphs", List.of("rate")); addMetric(metrics, "jdisc.http.filter.rule.blocked_requests", List.of("rate")); addMetric(metrics, "jdisc.http.filter.rule.allowed_requests", List.of("rate")); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("cluster-controller.down.count.last")); metrics.add(new Metric("cluster-controller.initializing.count.last")); metrics.add(new Metric("cluster-controller.maintenance.count.last")); metrics.add(new Metric("cluster-controller.retired.count.last")); metrics.add(new Metric("cluster-controller.stopping.count.last")); metrics.add(new Metric("cluster-controller.up.count.last")); metrics.add(new Metric("cluster-controller.cluster-state-change.count")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.last")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.max")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.sum")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.count")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.last")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.max")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.sum")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.count")); metrics.add(new Metric("cluster-controller.work-ms.last")); metrics.add(new Metric("cluster-controller.work-ms.sum")); metrics.add(new Metric("cluster-controller.work-ms.count")); metrics.add(new Metric("cluster-controller.is-master.last")); metrics.add(new Metric("cluster-controller.remote-task-queue.size.last")); metrics.add(new Metric("cluster-controller.node-event.count")); metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.last")); metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.max")); metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.last")); metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.max")); metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.last")); metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.max")); metrics.add(new Metric("cluster-controller.resource_usage.disk_limit.last")); metrics.add(new Metric("cluster-controller.resource_usage.memory_limit.last")); metrics.add(new Metric("reindexing.progress.last")); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getQrserverMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("peak_qps.max")); metrics.add(new Metric("search_connections.max")); metrics.add(new Metric("search_connections.sum")); metrics.add(new Metric("search_connections.count")); metrics.add(new Metric("search_connections.average")); metrics.add(new Metric("feed.latency.max")); metrics.add(new Metric("feed.latency.sum")); metrics.add(new Metric("feed.latency.count")); metrics.add(new Metric("feed.latency.average")); metrics.add(new Metric("feed.http-requests.count")); metrics.add(new Metric("feed.http-requests.rate")); metrics.add(new Metric("queries.rate")); metrics.add(new Metric("query_container_latency.max")); metrics.add(new Metric("query_container_latency.sum")); metrics.add(new Metric("query_container_latency.count")); metrics.add(new Metric("query_container_latency.average")); metrics.add(new Metric("query_latency.max")); metrics.add(new Metric("query_latency.sum")); metrics.add(new Metric("query_latency.count")); metrics.add(new Metric("query_latency.average")); metrics.add(new Metric("query_latency.95percentile")); metrics.add(new Metric("query_latency.99percentile")); metrics.add(new Metric("failed_queries.rate")); metrics.add(new Metric("degraded_queries.rate")); metrics.add(new Metric("hits_per_query.max")); metrics.add(new Metric("hits_per_query.sum")); metrics.add(new Metric("hits_per_query.count")); metrics.add(new Metric("hits_per_query.average")); metrics.add(new Metric("hits_per_query.95percentile")); metrics.add(new Metric("hits_per_query.99percentile")); metrics.add(new Metric("query_hit_offset.max")); metrics.add(new Metric("query_hit_offset.sum")); metrics.add(new Metric("query_hit_offset.count")); metrics.add(new Metric("documents_covered.count")); metrics.add(new Metric("documents_total.count")); metrics.add(new Metric("dispatch_internal.rate")); metrics.add(new Metric("dispatch_fdispatch.rate")); addMetric(metrics, "jdisc.render.latency", Set.of("min", "max", "count", "sum", "last", "average")); metrics.add(new Metric("totalhits_per_query.max")); metrics.add(new Metric("totalhits_per_query.sum")); metrics.add(new Metric("totalhits_per_query.count")); metrics.add(new Metric("totalhits_per_query.average")); metrics.add(new Metric("totalhits_per_query.95percentile")); metrics.add(new Metric("totalhits_per_query.99percentile")); metrics.add(new Metric("empty_results.rate")); metrics.add(new Metric("requestsOverQuota.rate")); metrics.add(new Metric("requestsOverQuota.count")); metrics.add(new Metric("relevance.at_1.sum")); metrics.add(new Metric("relevance.at_1.count")); metrics.add(new Metric("relevance.at_1.average")); metrics.add(new Metric("relevance.at_3.sum")); metrics.add(new Metric("relevance.at_3.count")); metrics.add(new Metric("relevance.at_3.average")); metrics.add(new Metric("relevance.at_10.sum")); metrics.add(new Metric("relevance.at_10.count")); metrics.add(new Metric("relevance.at_10.average")); metrics.add(new Metric("error.timeout.rate")); metrics.add(new Metric("error.backends_oos.rate")); metrics.add(new Metric("error.plugin_failure.rate")); metrics.add(new Metric("error.backend_communication_error.rate")); metrics.add(new Metric("error.empty_document_summaries.rate")); metrics.add(new Metric("error.invalid_query_parameter.rate")); metrics.add(new Metric("error.internal_server_error.rate")); metrics.add(new Metric("error.misconfigured_server.rate")); metrics.add(new Metric("error.invalid_query_transformation.rate")); metrics.add(new Metric("error.result_with_errors.rate")); metrics.add(new Metric("error.unspecified.rate")); metrics.add(new Metric("error.unhandled_exception.rate")); return metrics; } private static void addSearchNodeExecutorMetrics(Set<Metric> metrics, String prefix) { metrics.add(new Metric(prefix + ".queuesize.max")); metrics.add(new Metric(prefix + ".queuesize.sum")); metrics.add(new Metric(prefix + ".queuesize.count")); metrics.add(new Metric(prefix + ".maxpending.last")); metrics.add(new Metric(prefix + ".accepted.rate")); metrics.add(new Metric(prefix + ".wakeups.rate")); metrics.add(new Metric(prefix + ".utilization.max")); metrics.add(new Metric(prefix + ".utilization.sum")); metrics.add(new Metric(prefix + ".utilization.count")); } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("content.proton.documentdb.documents.total.last")); metrics.add(new Metric("content.proton.documentdb.documents.ready.last")); metrics.add(new Metric("content.proton.documentdb.documents.active.last")); metrics.add(new Metric("content.proton.documentdb.documents.removed.last")); metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last")); metrics.add(new Metric("content.proton.documentdb.disk_usage.last")); metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max")); metrics.add(new Metric("content.proton.documentdb.heart_beat_age.last")); metrics.add(new Metric("content.proton.transport.query.count.rate")); metrics.add(new Metric("content.proton.docsum.docs.rate")); metrics.add(new Metric("content.proton.docsum.latency.max")); metrics.add(new Metric("content.proton.docsum.latency.sum")); metrics.add(new Metric("content.proton.docsum.latency.count")); metrics.add(new Metric("content.proton.docsum.latency.average")); metrics.add(new Metric("content.proton.transport.query.latency.max")); metrics.add(new Metric("content.proton.transport.query.latency.sum")); metrics.add(new Metric("content.proton.transport.query.latency.count")); metrics.add(new Metric("content.proton.transport.query.latency.average")); metrics.add(new Metric("content.proton.search_protocol.query.latency.max")); metrics.add(new Metric("content.proton.search_protocol.query.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.query.latency.count")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count")); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.proton"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.flush"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.match"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer"); metrics.add(new Metric("content.proton.documentdb.job.total.average")); metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average")); metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average")); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.master"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.summary"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_inverter"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_writer"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.attribute_field_writer"); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.bucket_move.buckets_pending.last")); metrics.add(new Metric("content.proton.resource_usage.disk.average")); metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory.average")); metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.transient_memory.average")); metrics.add(new Metric("content.proton.resource_usage.transient_disk.average")); metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max")); metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max")); metrics.add(new Metric("content.proton.resource_usage.feeding_blocked.max")); metrics.add(new Metric("content.proton.resource_usage.malloc_arena.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.address_space.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.max")); metrics.add(new Metric("content.proton.transactionlog.entries.average")); metrics.add(new Metric("content.proton.transactionlog.disk_usage.average")); metrics.add(new Metric("content.proton.transactionlog.replay_time.last")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.matching.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.max")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.sum")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count")); return metrics; } private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) { for (String suffix : aggregateSuffices) { metrics.add(new Metric(metricName + "." + suffix)); } } }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getQrserverMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("sentinel.restarts.count")); metrics.add(new Metric("sentinel.totalRestarts.last")); metrics.add(new Metric("sentinel.uptime.last")); metrics.add(new Metric("sentinel.running.count")); metrics.add(new Metric("sentinel.running.last")); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("slobrok.heartbeats.failed.count")); metrics.add(new Metric("logd.processed.lines.count")); metrics.add(new Metric("worker.connections.max")); metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures")); metrics.add(new Metric("jrt.transport.peer-authorization-failures")); metrics.add(new Metric("jrt.transport.server.tls-connections-established")); metrics.add(new Metric("jrt.transport.client.tls-connections-established")); metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established")); metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established")); metrics.add(new Metric("vds.server.network.tls-handshakes-failed")); metrics.add(new Metric("vds.server.network.peer-authorization-failures")); metrics.add(new Metric("vds.server.network.client.tls-connections-established")); metrics.add(new Metric("vds.server.network.server.tls-connections-established")); metrics.add(new Metric("vds.server.network.client.insecure-connections-established")); metrics.add(new Metric("vds.server.network.server.insecure-connections-established")); metrics.add(new Metric("vds.server.network.tls-connections-broken")); metrics.add(new Metric("vds.server.network.failed-tls-config-reloads")); metrics.add(new Metric("vds.server.fnet.num-connections")); metrics.add(new Metric("node-certificate.expiry.seconds")); return metrics; } private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("configserver.requests.count")); metrics.add(new Metric("configserver.failedRequests.count")); metrics.add(new Metric("configserver.latency.max")); metrics.add(new Metric("configserver.latency.sum")); metrics.add(new Metric("configserver.latency.count")); metrics.add(new Metric("configserver.latency.average")); metrics.add(new Metric("configserver.cacheConfigElems.last")); metrics.add(new Metric("configserver.cacheChecksumElems.last")); metrics.add(new Metric("configserver.hosts.last")); metrics.add(new Metric("configserver.delayedResponses.count")); metrics.add(new Metric("configserver.sessionChangeErrors.count")); metrics.add(new Metric("configserver.zkZNodes.last")); metrics.add(new Metric("configserver.zkAvgLatency.last")); metrics.add(new Metric("configserver.zkMaxLatency.last")); metrics.add(new Metric("configserver.zkConnections.last")); metrics.add(new Metric("configserver.zkOutstandingRequests.last")); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, "jdisc.http.requests", List.of("rate", "count")); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics.add(new Metric("handled.latency.count")); metrics.add(new Metric("handled.latency.average")); metrics.add(new Metric("serverRejectedRequests.rate")); metrics.add(new Metric("serverRejectedRequests.count")); metrics.add(new Metric("serverThreadPoolSize.average")); metrics.add(new Metric("serverThreadPoolSize.min")); metrics.add(new Metric("serverThreadPoolSize.max")); metrics.add(new Metric("serverThreadPoolSize.rate")); metrics.add(new Metric("serverThreadPoolSize.count")); metrics.add(new Metric("serverThreadPoolSize.last")); metrics.add(new Metric("serverActiveThreads.average")); metrics.add(new Metric("serverActiveThreads.min")); metrics.add(new Metric("serverActiveThreads.max")); metrics.add(new Metric("serverActiveThreads.rate")); metrics.add(new Metric("serverActiveThreads.sum")); metrics.add(new Metric("serverActiveThreads.count")); metrics.add(new Metric("serverActiveThreads.last")); metrics.add(new Metric("serverNumOpenConnections.average")); metrics.add(new Metric("serverNumOpenConnections.max")); metrics.add(new Metric("serverNumOpenConnections.last")); metrics.add(new Metric("serverNumConnections.average")); metrics.add(new Metric("serverNumConnections.max")); metrics.add(new Metric("serverNumConnections.last")); { List<String> suffixes = List.of("sum", "count", "last", "min", "max"); addMetric(metrics, "jdisc.thread_pool.unhandled_exceptions", suffixes); addMetric(metrics, "jdisc.thread_pool.work_queue.capacity", suffixes); addMetric(metrics, "jdisc.thread_pool.work_queue.size", suffixes); } metrics.add(new Metric("httpapi_latency.max")); metrics.add(new Metric("httpapi_latency.sum")); metrics.add(new Metric("httpapi_latency.count")); metrics.add(new Metric("httpapi_latency.average")); metrics.add(new Metric("httpapi_pending.max")); metrics.add(new Metric("httpapi_pending.sum")); metrics.add(new Metric("httpapi_pending.count")); metrics.add(new Metric("httpapi_pending.average")); metrics.add(new Metric("httpapi_num_operations.rate")); metrics.add(new Metric("httpapi_num_updates.rate")); metrics.add(new Metric("httpapi_num_removes.rate")); metrics.add(new Metric("httpapi_num_puts.rate")); metrics.add(new Metric("httpapi_succeeded.rate")); metrics.add(new Metric("httpapi_failed.rate")); metrics.add(new Metric("httpapi_parse_error.rate")); addMetric(metrics, "httpapi_condition_not_met", List.of("rate")); metrics.add(new Metric("mem.heap.total.average")); metrics.add(new Metric("mem.heap.free.average")); metrics.add(new Metric("mem.heap.used.average")); metrics.add(new Metric("mem.heap.used.max")); metrics.add(new Metric("jdisc.memory_mappings.max")); metrics.add(new Metric("jdisc.open_file_descriptors.max")); metrics.add(new Metric("jdisc.gc.count.average")); metrics.add(new Metric("jdisc.gc.count.max")); metrics.add(new Metric("jdisc.gc.count.last")); metrics.add(new Metric("jdisc.gc.ms.average")); metrics.add(new Metric("jdisc.gc.ms.max")); metrics.add(new Metric("jdisc.gc.ms.last")); metrics.add(new Metric("jdisc.deactivated_containers.total.last")); metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last")); metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last")); metrics.add(new Metric("container-iam-role.expiry.seconds")); metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate")); addMetric(metrics, "jdisc.http.request.requests_per_connection", List.of("sum", "count", "min", "max", "average")); metrics.add(new Metric("http.status.1xx.rate")); metrics.add(new Metric("http.status.2xx.rate")); metrics.add(new Metric("http.status.3xx.rate")); metrics.add(new Metric("http.status.4xx.rate")); metrics.add(new Metric("http.status.5xx.rate")); metrics.add(new Metric("http.status.401.rate")); metrics.add(new Metric("http.status.403.rate")); metrics.add(new Metric("jdisc.http.request.uri_length.max")); metrics.add(new Metric("jdisc.http.request.uri_length.sum")); metrics.add(new Metric("jdisc.http.request.uri_length.count")); metrics.add(new Metric("jdisc.http.request.uri_length.average")); metrics.add(new Metric("jdisc.http.request.content_size.max")); metrics.add(new Metric("jdisc.http.request.content_size.sum")); metrics.add(new Metric("jdisc.http.request.content_size.count")); metrics.add(new Metric("jdisc.http.request.content_size.average")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.missing_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.expired_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.invalid_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_protocols.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_ciphers.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.unknown.rate")); metrics.add(new Metric("jdisc.http.handler.unhandled_exceptions.rate")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.max", List.of("last")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.reserved", List.of("last")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.busy", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.total", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.jetty.threadpool.queue.size", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.filtering.request.handled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.request.unhandled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.response.handled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.response.unhandled", List.of("rate")); addMetric(metrics, "jdisc.application.failed_component_graphs", List.of("rate")); addMetric(metrics, "jdisc.http.filter.rule.blocked_requests", List.of("rate")); addMetric(metrics, "jdisc.http.filter.rule.allowed_requests", List.of("rate")); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("cluster-controller.down.count.last")); metrics.add(new Metric("cluster-controller.initializing.count.last")); metrics.add(new Metric("cluster-controller.maintenance.count.last")); metrics.add(new Metric("cluster-controller.retired.count.last")); metrics.add(new Metric("cluster-controller.stopping.count.last")); metrics.add(new Metric("cluster-controller.up.count.last")); metrics.add(new Metric("cluster-controller.cluster-state-change.count")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.last")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.max")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.sum")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.count")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.last")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.max")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.sum")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.count")); metrics.add(new Metric("cluster-controller.work-ms.last")); metrics.add(new Metric("cluster-controller.work-ms.sum")); metrics.add(new Metric("cluster-controller.work-ms.count")); metrics.add(new Metric("cluster-controller.is-master.last")); metrics.add(new Metric("cluster-controller.remote-task-queue.size.last")); metrics.add(new Metric("cluster-controller.node-event.count")); metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.last")); metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.max")); metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.last")); metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.max")); metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.last")); metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.max")); metrics.add(new Metric("cluster-controller.resource_usage.disk_limit.last")); metrics.add(new Metric("cluster-controller.resource_usage.memory_limit.last")); metrics.add(new Metric("reindexing.progress.last")); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getQrserverMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("peak_qps.max")); metrics.add(new Metric("search_connections.max")); metrics.add(new Metric("search_connections.sum")); metrics.add(new Metric("search_connections.count")); metrics.add(new Metric("search_connections.average")); metrics.add(new Metric("feed.latency.max")); metrics.add(new Metric("feed.latency.sum")); metrics.add(new Metric("feed.latency.count")); metrics.add(new Metric("feed.latency.average")); metrics.add(new Metric("feed.http-requests.count")); metrics.add(new Metric("feed.http-requests.rate")); metrics.add(new Metric("queries.rate")); metrics.add(new Metric("query_container_latency.max")); metrics.add(new Metric("query_container_latency.sum")); metrics.add(new Metric("query_container_latency.count")); metrics.add(new Metric("query_container_latency.average")); metrics.add(new Metric("query_latency.max")); metrics.add(new Metric("query_latency.sum")); metrics.add(new Metric("query_latency.count")); metrics.add(new Metric("query_latency.average")); metrics.add(new Metric("query_latency.95percentile")); metrics.add(new Metric("query_latency.99percentile")); metrics.add(new Metric("failed_queries.rate")); metrics.add(new Metric("degraded_queries.rate")); metrics.add(new Metric("hits_per_query.max")); metrics.add(new Metric("hits_per_query.sum")); metrics.add(new Metric("hits_per_query.count")); metrics.add(new Metric("hits_per_query.average")); metrics.add(new Metric("hits_per_query.95percentile")); metrics.add(new Metric("hits_per_query.99percentile")); metrics.add(new Metric("query_hit_offset.max")); metrics.add(new Metric("query_hit_offset.sum")); metrics.add(new Metric("query_hit_offset.count")); metrics.add(new Metric("documents_covered.count")); metrics.add(new Metric("documents_total.count")); metrics.add(new Metric("dispatch_internal.rate")); metrics.add(new Metric("dispatch_fdispatch.rate")); addMetric(metrics, "jdisc.render.latency", Set.of("min", "max", "count", "sum", "last", "average")); metrics.add(new Metric("totalhits_per_query.max")); metrics.add(new Metric("totalhits_per_query.sum")); metrics.add(new Metric("totalhits_per_query.count")); metrics.add(new Metric("totalhits_per_query.average")); metrics.add(new Metric("totalhits_per_query.95percentile")); metrics.add(new Metric("totalhits_per_query.99percentile")); metrics.add(new Metric("empty_results.rate")); metrics.add(new Metric("requestsOverQuota.rate")); metrics.add(new Metric("requestsOverQuota.count")); metrics.add(new Metric("relevance.at_1.sum")); metrics.add(new Metric("relevance.at_1.count")); metrics.add(new Metric("relevance.at_1.average")); metrics.add(new Metric("relevance.at_3.sum")); metrics.add(new Metric("relevance.at_3.count")); metrics.add(new Metric("relevance.at_3.average")); metrics.add(new Metric("relevance.at_10.sum")); metrics.add(new Metric("relevance.at_10.count")); metrics.add(new Metric("relevance.at_10.average")); metrics.add(new Metric("error.timeout.rate")); metrics.add(new Metric("error.backends_oos.rate")); metrics.add(new Metric("error.plugin_failure.rate")); metrics.add(new Metric("error.backend_communication_error.rate")); metrics.add(new Metric("error.empty_document_summaries.rate")); metrics.add(new Metric("error.invalid_query_parameter.rate")); metrics.add(new Metric("error.internal_server_error.rate")); metrics.add(new Metric("error.misconfigured_server.rate")); metrics.add(new Metric("error.invalid_query_transformation.rate")); metrics.add(new Metric("error.result_with_errors.rate")); metrics.add(new Metric("error.unspecified.rate")); metrics.add(new Metric("error.unhandled_exception.rate")); return metrics; } private static void addSearchNodeExecutorMetrics(Set<Metric> metrics, String prefix) { metrics.add(new Metric(prefix + ".queuesize.max")); metrics.add(new Metric(prefix + ".queuesize.sum")); metrics.add(new Metric(prefix + ".queuesize.count")); metrics.add(new Metric(prefix + ".maxpending.last")); metrics.add(new Metric(prefix + ".accepted.rate")); metrics.add(new Metric(prefix + ".wakeups.rate")); metrics.add(new Metric(prefix + ".utilization.max")); metrics.add(new Metric(prefix + ".utilization.sum")); metrics.add(new Metric(prefix + ".utilization.count")); } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("content.proton.documentdb.documents.total.last")); metrics.add(new Metric("content.proton.documentdb.documents.ready.last")); metrics.add(new Metric("content.proton.documentdb.documents.active.last")); metrics.add(new Metric("content.proton.documentdb.documents.removed.last")); metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last")); metrics.add(new Metric("content.proton.documentdb.disk_usage.last")); metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max")); metrics.add(new Metric("content.proton.documentdb.heart_beat_age.last")); metrics.add(new Metric("content.proton.transport.query.count.rate")); metrics.add(new Metric("content.proton.docsum.docs.rate")); metrics.add(new Metric("content.proton.docsum.latency.max")); metrics.add(new Metric("content.proton.docsum.latency.sum")); metrics.add(new Metric("content.proton.docsum.latency.count")); metrics.add(new Metric("content.proton.docsum.latency.average")); metrics.add(new Metric("content.proton.transport.query.latency.max")); metrics.add(new Metric("content.proton.transport.query.latency.sum")); metrics.add(new Metric("content.proton.transport.query.latency.count")); metrics.add(new Metric("content.proton.transport.query.latency.average")); metrics.add(new Metric("content.proton.search_protocol.query.latency.max")); metrics.add(new Metric("content.proton.search_protocol.query.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.query.latency.count")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count")); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.proton"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.flush"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.match"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer"); metrics.add(new Metric("content.proton.documentdb.job.total.average")); metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average")); metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average")); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.master"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.summary"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_inverter"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_writer"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.attribute_field_writer"); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.bucket_move.buckets_pending.last")); metrics.add(new Metric("content.proton.resource_usage.disk.average")); metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory.average")); metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.transient_memory.average")); metrics.add(new Metric("content.proton.resource_usage.transient_disk.average")); metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max")); metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max")); metrics.add(new Metric("content.proton.resource_usage.feeding_blocked.max")); metrics.add(new Metric("content.proton.resource_usage.malloc_arena.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.address_space.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.max")); metrics.add(new Metric("content.proton.transactionlog.entries.average")); metrics.add(new Metric("content.proton.transactionlog.disk_usage.average")); metrics.add(new Metric("content.proton.transactionlog.replay_time.last")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.matching.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.max")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.sum")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count")); return metrics; } private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) { for (String suffix : aggregateSuffices) { metrics.add(new Metric(metricName + "." + suffix)); } } }
Thanks. Fixed.
private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("vds.datastored.alldisks.docs.average")); metrics.add(new Metric("vds.datastored.alldisks.bytes.average")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.max")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.count")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.average")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate")); metrics.add(new Metric("vds.idealstate.buckets_rechecking.average")); metrics.add(new Metric("vds.idealstate.idealstate_diff.average")); metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average")); metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average")); metrics.add(new Metric("vds.idealstate.buckets.average")); metrics.add(new Metric("vds.idealstate.buckets_notrusted.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_moving_out.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_out.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_in.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_syncing.average")); metrics.add(new Metric("vds.idealstate.max_observed_time_since_last_gc_sec.average")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.blocked.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.throttled.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_changed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_blocked.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.count")); metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.rate")); metrics.add(new Metric("vds.distributor.puts.sum.latency.max")); metrics.add(new Metric("vds.distributor.puts.sum.latency.sum")); metrics.add(new Metric("vds.distributor.puts.sum.latency.count")); metrics.add(new Metric("vds.distributor.puts.sum.latency.average")); metrics.add(new Metric("vds.distributor.puts.sum.ok.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notconnected.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notready.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.wrongdistributor.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.safe_time_not_reached.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.storagefailure.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.timeout.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.busy.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.inconsistent_bucket.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.removes.sum.latency.max")); metrics.add(new Metric("vds.distributor.removes.sum.latency.sum")); metrics.add(new Metric("vds.distributor.removes.sum.latency.count")); metrics.add(new Metric("vds.distributor.removes.sum.latency.average")); metrics.add(new Metric("vds.distributor.removes.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.updates.sum.latency.max")); metrics.add(new Metric("vds.distributor.updates.sum.latency.sum")); metrics.add(new Metric("vds.distributor.updates.sum.latency.count")); metrics.add(new Metric("vds.distributor.updates.sum.latency.average")); metrics.add(new Metric("vds.distributor.updates.sum.ok.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.latency.max")); metrics.add(new Metric("vds.distributor.gets.sum.latency.sum")); metrics.add(new Metric("vds.distributor.gets.sum.latency.count")); metrics.add(new Metric("vds.distributor.gets.sum.latency.average")); metrics.add(new Metric("vds.distributor.gets.sum.ok.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.max")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.count")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.average")); metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.docsstored.average")); metrics.add(new Metric("vds.distributor.bytesstored.average")); metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count")); metrics.add(new Metric("vds.mergethrottler.queuesize.max")); metrics.add(new Metric("vds.mergethrottler.queuesize.sum")); metrics.add(new Metric("vds.mergethrottler.queuesize.count")); metrics.add(new Metric("vds.mergethrottler.active_window_size.max")); metrics.add(new Metric("vds.mergethrottler.active_window_size.sum")); metrics.add(new Metric("vds.mergethrottler.active_window_size.count")); metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate")); metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.failures.busy.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.failures.total.rate")); return metrics; }
metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate"));
private static Set<Metric> getStorageMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("vds.datastored.alldisks.docs.average")); metrics.add(new Metric("vds.datastored.alldisks.bytes.average")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagevisitorlifetime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.max")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.sum")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.count")); metrics.add(new Metric("vds.filestor.alldisks.queuesize.average")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.max")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.sum")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.count")); metrics.add(new Metric("vds.filestor.alldisks.averagequeuewait.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.max")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.sum")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.size.count")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.active_operations.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergemetadatareadlatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatareadlatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.mergedatawritelatency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put_latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_latency.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.max")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.sum")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.count")); metrics.add(new Metric("vds.visitor.allthreads.queuesize.count.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.completed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.created.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.failed.sum.rate")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averagemessagesendtime.sum.average")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.max")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.sum")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.count")); metrics.add(new Metric("vds.visitor.allthreads.averageprocessingtime.sum.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.put.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.get.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.test_and_set_failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.update.sum.request_size.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.createiterator.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.visit.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.remove_location.sum.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.splitbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.joinbuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.count.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.failed.rate")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.max")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.sum")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.count")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.deletebuckets.latency.average")); metrics.add(new Metric("vds.filestor.alldisks.allthreads.setbucketstates.count.rate")); metrics.add(new Metric("vds.idealstate.buckets_rechecking.average")); metrics.add(new Metric("vds.idealstate.idealstate_diff.average")); metrics.add(new Metric("vds.idealstate.buckets_toofewcopies.average")); metrics.add(new Metric("vds.idealstate.buckets_toomanycopies.average")); metrics.add(new Metric("vds.idealstate.buckets.average")); metrics.add(new Metric("vds.idealstate.buckets_notrusted.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_moving_out.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_out.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_copying_in.average")); metrics.add(new Metric("vds.idealstate.bucket_replicas_syncing.average")); metrics.add(new Metric("vds.idealstate.max_observed_time_since_last_gc_sec.average")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.delete_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.merge_bucket.blocked.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.throttled.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_changed.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_blocked.rate")); metrics.add(new Metric("vds.idealstate.merge_bucket.source_only_copy_delete_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.split_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.join_bucket.done_ok.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.done_failed.rate")); metrics.add(new Metric("vds.idealstate.join_bucket.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_ok.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.done_failed.rate")); metrics.add(new Metric("vds.idealstate.garbage_collection.pending.average")); metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.count")); metrics.add(new Metric("vds.idealstate.garbage_collection.documents_removed.rate")); metrics.add(new Metric("vds.distributor.puts.sum.latency.max")); metrics.add(new Metric("vds.distributor.puts.sum.latency.sum")); metrics.add(new Metric("vds.distributor.puts.sum.latency.count")); metrics.add(new Metric("vds.distributor.puts.sum.latency.average")); metrics.add(new Metric("vds.distributor.puts.sum.ok.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notconnected.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.notready.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.wrongdistributor.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.safe_time_not_reached.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.storagefailure.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.timeout.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.busy.rate")); metrics.add(new Metric("vds.distributor.puts.sum.failures.inconsistent_bucket.rate")); metrics.add(new Metric("vds.distributor.removes.sum.latency.max")); metrics.add(new Metric("vds.distributor.removes.sum.latency.sum")); metrics.add(new Metric("vds.distributor.removes.sum.latency.count")); metrics.add(new Metric("vds.distributor.removes.sum.latency.average")); metrics.add(new Metric("vds.distributor.removes.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.removes.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.updates.sum.latency.max")); metrics.add(new Metric("vds.distributor.updates.sum.latency.sum")); metrics.add(new Metric("vds.distributor.updates.sum.latency.count")); metrics.add(new Metric("vds.distributor.updates.sum.latency.average")); metrics.add(new Metric("vds.distributor.updates.sum.ok.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.test_and_set_failed.rate")); metrics.add(new Metric("vds.distributor.updates.sum.failures.concurrent_mutations.rate")); metrics.add(new Metric("vds.distributor.updates.sum.diverging_timestamp_updates.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.ok.rate")); metrics.add(new Metric("vds.distributor.removelocations.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.latency.max")); metrics.add(new Metric("vds.distributor.gets.sum.latency.sum")); metrics.add(new Metric("vds.distributor.gets.sum.latency.count")); metrics.add(new Metric("vds.distributor.gets.sum.latency.average")); metrics.add(new Metric("vds.distributor.gets.sum.ok.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.gets.sum.failures.notfound.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.max")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.sum")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.count")); metrics.add(new Metric("vds.distributor.visitor.sum.latency.average")); metrics.add(new Metric("vds.distributor.visitor.sum.ok.rate")); metrics.add(new Metric("vds.distributor.visitor.sum.failures.total.rate")); metrics.add(new Metric("vds.distributor.docsstored.average")); metrics.add(new Metric("vds.distributor.bytesstored.average")); metrics.add(new Metric("vds.bouncer.clock_skew_aborts.count")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.max")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.sum")); metrics.add(new Metric("vds.mergethrottler.averagequeuewaitingtime.count")); metrics.add(new Metric("vds.mergethrottler.queuesize.max")); metrics.add(new Metric("vds.mergethrottler.queuesize.sum")); metrics.add(new Metric("vds.mergethrottler.queuesize.count")); metrics.add(new Metric("vds.mergethrottler.active_window_size.max")); metrics.add(new Metric("vds.mergethrottler.active_window_size.sum")); metrics.add(new Metric("vds.mergethrottler.active_window_size.count")); metrics.add(new Metric("vds.mergethrottler.bounced_due_to_back_pressure.rate")); metrics.add(new Metric("vds.mergethrottler.locallyexecutedmerges.ok.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.ok.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.failures.busy.rate")); metrics.add(new Metric("vds.mergethrottler.mergechains.failures.total.rate")); return metrics; }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getQrserverMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("sentinel.restarts.count")); metrics.add(new Metric("sentinel.totalRestarts.last")); metrics.add(new Metric("sentinel.uptime.last")); metrics.add(new Metric("sentinel.running.count")); metrics.add(new Metric("sentinel.running.last")); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("slobrok.heartbeats.failed.count")); metrics.add(new Metric("logd.processed.lines.count")); metrics.add(new Metric("worker.connections.max")); metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures")); metrics.add(new Metric("jrt.transport.peer-authorization-failures")); metrics.add(new Metric("jrt.transport.server.tls-connections-established")); metrics.add(new Metric("jrt.transport.client.tls-connections-established")); metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established")); metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established")); metrics.add(new Metric("vds.server.network.tls-handshakes-failed")); metrics.add(new Metric("vds.server.network.peer-authorization-failures")); metrics.add(new Metric("vds.server.network.client.tls-connections-established")); metrics.add(new Metric("vds.server.network.server.tls-connections-established")); metrics.add(new Metric("vds.server.network.client.insecure-connections-established")); metrics.add(new Metric("vds.server.network.server.insecure-connections-established")); metrics.add(new Metric("vds.server.network.tls-connections-broken")); metrics.add(new Metric("vds.server.network.failed-tls-config-reloads")); metrics.add(new Metric("vds.server.fnet.num-connections")); metrics.add(new Metric("node-certificate.expiry.seconds")); return metrics; } private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("configserver.requests.count")); metrics.add(new Metric("configserver.failedRequests.count")); metrics.add(new Metric("configserver.latency.max")); metrics.add(new Metric("configserver.latency.sum")); metrics.add(new Metric("configserver.latency.count")); metrics.add(new Metric("configserver.latency.average")); metrics.add(new Metric("configserver.cacheConfigElems.last")); metrics.add(new Metric("configserver.cacheChecksumElems.last")); metrics.add(new Metric("configserver.hosts.last")); metrics.add(new Metric("configserver.delayedResponses.count")); metrics.add(new Metric("configserver.sessionChangeErrors.count")); metrics.add(new Metric("configserver.zkZNodes.last")); metrics.add(new Metric("configserver.zkAvgLatency.last")); metrics.add(new Metric("configserver.zkMaxLatency.last")); metrics.add(new Metric("configserver.zkConnections.last")); metrics.add(new Metric("configserver.zkOutstandingRequests.last")); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, "jdisc.http.requests", List.of("rate", "count")); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics.add(new Metric("handled.latency.count")); metrics.add(new Metric("handled.latency.average")); metrics.add(new Metric("serverRejectedRequests.rate")); metrics.add(new Metric("serverRejectedRequests.count")); metrics.add(new Metric("serverThreadPoolSize.average")); metrics.add(new Metric("serverThreadPoolSize.min")); metrics.add(new Metric("serverThreadPoolSize.max")); metrics.add(new Metric("serverThreadPoolSize.rate")); metrics.add(new Metric("serverThreadPoolSize.count")); metrics.add(new Metric("serverThreadPoolSize.last")); metrics.add(new Metric("serverActiveThreads.average")); metrics.add(new Metric("serverActiveThreads.min")); metrics.add(new Metric("serverActiveThreads.max")); metrics.add(new Metric("serverActiveThreads.rate")); metrics.add(new Metric("serverActiveThreads.sum")); metrics.add(new Metric("serverActiveThreads.count")); metrics.add(new Metric("serverActiveThreads.last")); metrics.add(new Metric("serverNumOpenConnections.average")); metrics.add(new Metric("serverNumOpenConnections.max")); metrics.add(new Metric("serverNumOpenConnections.last")); metrics.add(new Metric("serverNumConnections.average")); metrics.add(new Metric("serverNumConnections.max")); metrics.add(new Metric("serverNumConnections.last")); { List<String> suffixes = List.of("sum", "count", "last", "min", "max"); addMetric(metrics, "jdisc.thread_pool.unhandled_exceptions", suffixes); addMetric(metrics, "jdisc.thread_pool.work_queue.capacity", suffixes); addMetric(metrics, "jdisc.thread_pool.work_queue.size", suffixes); } metrics.add(new Metric("httpapi_latency.max")); metrics.add(new Metric("httpapi_latency.sum")); metrics.add(new Metric("httpapi_latency.count")); metrics.add(new Metric("httpapi_latency.average")); metrics.add(new Metric("httpapi_pending.max")); metrics.add(new Metric("httpapi_pending.sum")); metrics.add(new Metric("httpapi_pending.count")); metrics.add(new Metric("httpapi_pending.average")); metrics.add(new Metric("httpapi_num_operations.rate")); metrics.add(new Metric("httpapi_num_updates.rate")); metrics.add(new Metric("httpapi_num_removes.rate")); metrics.add(new Metric("httpapi_num_puts.rate")); metrics.add(new Metric("httpapi_succeeded.rate")); metrics.add(new Metric("httpapi_failed.rate")); metrics.add(new Metric("httpapi_parse_error.rate")); addMetric(metrics, "httpapi_condition_not_met", List.of("rate")); metrics.add(new Metric("mem.heap.total.average")); metrics.add(new Metric("mem.heap.free.average")); metrics.add(new Metric("mem.heap.used.average")); metrics.add(new Metric("mem.heap.used.max")); metrics.add(new Metric("jdisc.memory_mappings.max")); metrics.add(new Metric("jdisc.open_file_descriptors.max")); metrics.add(new Metric("jdisc.gc.count.average")); metrics.add(new Metric("jdisc.gc.count.max")); metrics.add(new Metric("jdisc.gc.count.last")); metrics.add(new Metric("jdisc.gc.ms.average")); metrics.add(new Metric("jdisc.gc.ms.max")); metrics.add(new Metric("jdisc.gc.ms.last")); metrics.add(new Metric("jdisc.deactivated_containers.total.last")); metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last")); metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last")); metrics.add(new Metric("container-iam-role.expiry.seconds")); metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate")); addMetric(metrics, "jdisc.http.request.requests_per_connection", List.of("sum", "count", "min", "max", "average")); metrics.add(new Metric("http.status.1xx.rate")); metrics.add(new Metric("http.status.2xx.rate")); metrics.add(new Metric("http.status.3xx.rate")); metrics.add(new Metric("http.status.4xx.rate")); metrics.add(new Metric("http.status.5xx.rate")); metrics.add(new Metric("http.status.401.rate")); metrics.add(new Metric("http.status.403.rate")); metrics.add(new Metric("jdisc.http.request.uri_length.max")); metrics.add(new Metric("jdisc.http.request.uri_length.sum")); metrics.add(new Metric("jdisc.http.request.uri_length.count")); metrics.add(new Metric("jdisc.http.request.uri_length.average")); metrics.add(new Metric("jdisc.http.request.content_size.max")); metrics.add(new Metric("jdisc.http.request.content_size.sum")); metrics.add(new Metric("jdisc.http.request.content_size.count")); metrics.add(new Metric("jdisc.http.request.content_size.average")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.missing_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.expired_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.invalid_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_protocols.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_ciphers.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.unknown.rate")); metrics.add(new Metric("jdisc.http.handler.unhandled_exceptions.rate")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.max", List.of("last")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.reserved", List.of("last")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.busy", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.total", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.jetty.threadpool.queue.size", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.filtering.request.handled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.request.unhandled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.response.handled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.response.unhandled", List.of("rate")); addMetric(metrics, "jdisc.application.failed_component_graphs", List.of("rate")); addMetric(metrics, "jdisc.http.filter.rule.blocked_requests", List.of("rate")); addMetric(metrics, "jdisc.http.filter.rule.allowed_requests", List.of("rate")); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("cluster-controller.down.count.last")); metrics.add(new Metric("cluster-controller.initializing.count.last")); metrics.add(new Metric("cluster-controller.maintenance.count.last")); metrics.add(new Metric("cluster-controller.retired.count.last")); metrics.add(new Metric("cluster-controller.stopping.count.last")); metrics.add(new Metric("cluster-controller.up.count.last")); metrics.add(new Metric("cluster-controller.cluster-state-change.count")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.last")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.max")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.sum")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.count")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.last")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.max")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.sum")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.count")); metrics.add(new Metric("cluster-controller.work-ms.last")); metrics.add(new Metric("cluster-controller.work-ms.sum")); metrics.add(new Metric("cluster-controller.work-ms.count")); metrics.add(new Metric("cluster-controller.is-master.last")); metrics.add(new Metric("cluster-controller.remote-task-queue.size.last")); metrics.add(new Metric("cluster-controller.node-event.count")); metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.last")); metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.max")); metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.last")); metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.max")); metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.last")); metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.max")); metrics.add(new Metric("cluster-controller.resource_usage.disk_limit.last")); metrics.add(new Metric("cluster-controller.resource_usage.memory_limit.last")); metrics.add(new Metric("reindexing.progress.last")); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getQrserverMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("peak_qps.max")); metrics.add(new Metric("search_connections.max")); metrics.add(new Metric("search_connections.sum")); metrics.add(new Metric("search_connections.count")); metrics.add(new Metric("search_connections.average")); metrics.add(new Metric("feed.latency.max")); metrics.add(new Metric("feed.latency.sum")); metrics.add(new Metric("feed.latency.count")); metrics.add(new Metric("feed.latency.average")); metrics.add(new Metric("feed.http-requests.count")); metrics.add(new Metric("feed.http-requests.rate")); metrics.add(new Metric("queries.rate")); metrics.add(new Metric("query_container_latency.max")); metrics.add(new Metric("query_container_latency.sum")); metrics.add(new Metric("query_container_latency.count")); metrics.add(new Metric("query_container_latency.average")); metrics.add(new Metric("query_latency.max")); metrics.add(new Metric("query_latency.sum")); metrics.add(new Metric("query_latency.count")); metrics.add(new Metric("query_latency.average")); metrics.add(new Metric("query_latency.95percentile")); metrics.add(new Metric("query_latency.99percentile")); metrics.add(new Metric("failed_queries.rate")); metrics.add(new Metric("degraded_queries.rate")); metrics.add(new Metric("hits_per_query.max")); metrics.add(new Metric("hits_per_query.sum")); metrics.add(new Metric("hits_per_query.count")); metrics.add(new Metric("hits_per_query.average")); metrics.add(new Metric("hits_per_query.95percentile")); metrics.add(new Metric("hits_per_query.99percentile")); metrics.add(new Metric("query_hit_offset.max")); metrics.add(new Metric("query_hit_offset.sum")); metrics.add(new Metric("query_hit_offset.count")); metrics.add(new Metric("documents_covered.count")); metrics.add(new Metric("documents_total.count")); metrics.add(new Metric("dispatch_internal.rate")); metrics.add(new Metric("dispatch_fdispatch.rate")); addMetric(metrics, "jdisc.render.latency", Set.of("min", "max", "count", "sum", "last", "average")); metrics.add(new Metric("totalhits_per_query.max")); metrics.add(new Metric("totalhits_per_query.sum")); metrics.add(new Metric("totalhits_per_query.count")); metrics.add(new Metric("totalhits_per_query.average")); metrics.add(new Metric("totalhits_per_query.95percentile")); metrics.add(new Metric("totalhits_per_query.99percentile")); metrics.add(new Metric("empty_results.rate")); metrics.add(new Metric("requestsOverQuota.rate")); metrics.add(new Metric("requestsOverQuota.count")); metrics.add(new Metric("relevance.at_1.sum")); metrics.add(new Metric("relevance.at_1.count")); metrics.add(new Metric("relevance.at_1.average")); metrics.add(new Metric("relevance.at_3.sum")); metrics.add(new Metric("relevance.at_3.count")); metrics.add(new Metric("relevance.at_3.average")); metrics.add(new Metric("relevance.at_10.sum")); metrics.add(new Metric("relevance.at_10.count")); metrics.add(new Metric("relevance.at_10.average")); metrics.add(new Metric("error.timeout.rate")); metrics.add(new Metric("error.backends_oos.rate")); metrics.add(new Metric("error.plugin_failure.rate")); metrics.add(new Metric("error.backend_communication_error.rate")); metrics.add(new Metric("error.empty_document_summaries.rate")); metrics.add(new Metric("error.invalid_query_parameter.rate")); metrics.add(new Metric("error.internal_server_error.rate")); metrics.add(new Metric("error.misconfigured_server.rate")); metrics.add(new Metric("error.invalid_query_transformation.rate")); metrics.add(new Metric("error.result_with_errors.rate")); metrics.add(new Metric("error.unspecified.rate")); metrics.add(new Metric("error.unhandled_exception.rate")); return metrics; } private static void addSearchNodeExecutorMetrics(Set<Metric> metrics, String prefix) { metrics.add(new Metric(prefix + ".queuesize.max")); metrics.add(new Metric(prefix + ".queuesize.sum")); metrics.add(new Metric(prefix + ".queuesize.count")); metrics.add(new Metric(prefix + ".maxpending.last")); metrics.add(new Metric(prefix + ".accepted.rate")); metrics.add(new Metric(prefix + ".wakeups.rate")); metrics.add(new Metric(prefix + ".utilization.max")); metrics.add(new Metric(prefix + ".utilization.sum")); metrics.add(new Metric(prefix + ".utilization.count")); } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("content.proton.documentdb.documents.total.last")); metrics.add(new Metric("content.proton.documentdb.documents.ready.last")); metrics.add(new Metric("content.proton.documentdb.documents.active.last")); metrics.add(new Metric("content.proton.documentdb.documents.removed.last")); metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last")); metrics.add(new Metric("content.proton.documentdb.disk_usage.last")); metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max")); metrics.add(new Metric("content.proton.documentdb.heart_beat_age.last")); metrics.add(new Metric("content.proton.transport.query.count.rate")); metrics.add(new Metric("content.proton.docsum.docs.rate")); metrics.add(new Metric("content.proton.docsum.latency.max")); metrics.add(new Metric("content.proton.docsum.latency.sum")); metrics.add(new Metric("content.proton.docsum.latency.count")); metrics.add(new Metric("content.proton.docsum.latency.average")); metrics.add(new Metric("content.proton.transport.query.latency.max")); metrics.add(new Metric("content.proton.transport.query.latency.sum")); metrics.add(new Metric("content.proton.transport.query.latency.count")); metrics.add(new Metric("content.proton.transport.query.latency.average")); metrics.add(new Metric("content.proton.search_protocol.query.latency.max")); metrics.add(new Metric("content.proton.search_protocol.query.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.query.latency.count")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count")); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.proton"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.flush"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.match"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer"); metrics.add(new Metric("content.proton.documentdb.job.total.average")); metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average")); metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average")); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.master"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.summary"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_inverter"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_writer"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.attribute_field_writer"); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.bucket_move.buckets_pending.last")); metrics.add(new Metric("content.proton.resource_usage.disk.average")); metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory.average")); metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.transient_memory.average")); metrics.add(new Metric("content.proton.resource_usage.transient_disk.average")); metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max")); metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max")); metrics.add(new Metric("content.proton.resource_usage.feeding_blocked.max")); metrics.add(new Metric("content.proton.resource_usage.malloc_arena.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.address_space.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.max")); metrics.add(new Metric("content.proton.transactionlog.entries.average")); metrics.add(new Metric("content.proton.transactionlog.disk_usage.average")); metrics.add(new Metric("content.proton.transactionlog.replay_time.last")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.matching.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.max")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.sum")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count")); return metrics; } private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) { for (String suffix : aggregateSuffices) { metrics.add(new Metric(metricName + "." + suffix)); } } }
class VespaMetricSet { public static final MetricSet vespaMetricSet = new MetricSet("vespa", getVespaMetrics(), singleton(defaultVespaMetricSet)); private static Set<Metric> getVespaMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.addAll(getSearchNodeMetrics()); metrics.addAll(getStorageMetrics()); metrics.addAll(getDocprocMetrics()); metrics.addAll(getClusterControllerMetrics()); metrics.addAll(getQrserverMetrics()); metrics.addAll(getContainerMetrics()); metrics.addAll(getConfigServerMetrics()); metrics.addAll(getSentinelMetrics()); metrics.addAll(getOtherMetrics()); return Collections.unmodifiableSet(metrics); } private static Set<Metric> getSentinelMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("sentinel.restarts.count")); metrics.add(new Metric("sentinel.totalRestarts.last")); metrics.add(new Metric("sentinel.uptime.last")); metrics.add(new Metric("sentinel.running.count")); metrics.add(new Metric("sentinel.running.last")); return metrics; } private static Set<Metric> getOtherMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("slobrok.heartbeats.failed.count")); metrics.add(new Metric("logd.processed.lines.count")); metrics.add(new Metric("worker.connections.max")); metrics.add(new Metric("jrt.transport.tls-certificate-verification-failures")); metrics.add(new Metric("jrt.transport.peer-authorization-failures")); metrics.add(new Metric("jrt.transport.server.tls-connections-established")); metrics.add(new Metric("jrt.transport.client.tls-connections-established")); metrics.add(new Metric("jrt.transport.server.unencrypted-connections-established")); metrics.add(new Metric("jrt.transport.client.unencrypted-connections-established")); metrics.add(new Metric("vds.server.network.tls-handshakes-failed")); metrics.add(new Metric("vds.server.network.peer-authorization-failures")); metrics.add(new Metric("vds.server.network.client.tls-connections-established")); metrics.add(new Metric("vds.server.network.server.tls-connections-established")); metrics.add(new Metric("vds.server.network.client.insecure-connections-established")); metrics.add(new Metric("vds.server.network.server.insecure-connections-established")); metrics.add(new Metric("vds.server.network.tls-connections-broken")); metrics.add(new Metric("vds.server.network.failed-tls-config-reloads")); metrics.add(new Metric("vds.server.fnet.num-connections")); metrics.add(new Metric("node-certificate.expiry.seconds")); return metrics; } private static Set<Metric> getConfigServerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("configserver.requests.count")); metrics.add(new Metric("configserver.failedRequests.count")); metrics.add(new Metric("configserver.latency.max")); metrics.add(new Metric("configserver.latency.sum")); metrics.add(new Metric("configserver.latency.count")); metrics.add(new Metric("configserver.latency.average")); metrics.add(new Metric("configserver.cacheConfigElems.last")); metrics.add(new Metric("configserver.cacheChecksumElems.last")); metrics.add(new Metric("configserver.hosts.last")); metrics.add(new Metric("configserver.delayedResponses.count")); metrics.add(new Metric("configserver.sessionChangeErrors.count")); metrics.add(new Metric("configserver.zkZNodes.last")); metrics.add(new Metric("configserver.zkAvgLatency.last")); metrics.add(new Metric("configserver.zkMaxLatency.last")); metrics.add(new Metric("configserver.zkConnections.last")); metrics.add(new Metric("configserver.zkOutstandingRequests.last")); return metrics; } private static Set<Metric> getContainerMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); addMetric(metrics, "jdisc.http.requests", List.of("rate", "count")); metrics.add(new Metric("handled.requests.count")); metrics.add(new Metric("handled.latency.max")); metrics.add(new Metric("handled.latency.sum")); metrics.add(new Metric("handled.latency.count")); metrics.add(new Metric("handled.latency.average")); metrics.add(new Metric("serverRejectedRequests.rate")); metrics.add(new Metric("serverRejectedRequests.count")); metrics.add(new Metric("serverThreadPoolSize.average")); metrics.add(new Metric("serverThreadPoolSize.min")); metrics.add(new Metric("serverThreadPoolSize.max")); metrics.add(new Metric("serverThreadPoolSize.rate")); metrics.add(new Metric("serverThreadPoolSize.count")); metrics.add(new Metric("serverThreadPoolSize.last")); metrics.add(new Metric("serverActiveThreads.average")); metrics.add(new Metric("serverActiveThreads.min")); metrics.add(new Metric("serverActiveThreads.max")); metrics.add(new Metric("serverActiveThreads.rate")); metrics.add(new Metric("serverActiveThreads.sum")); metrics.add(new Metric("serverActiveThreads.count")); metrics.add(new Metric("serverActiveThreads.last")); metrics.add(new Metric("serverNumOpenConnections.average")); metrics.add(new Metric("serverNumOpenConnections.max")); metrics.add(new Metric("serverNumOpenConnections.last")); metrics.add(new Metric("serverNumConnections.average")); metrics.add(new Metric("serverNumConnections.max")); metrics.add(new Metric("serverNumConnections.last")); { List<String> suffixes = List.of("sum", "count", "last", "min", "max"); addMetric(metrics, "jdisc.thread_pool.unhandled_exceptions", suffixes); addMetric(metrics, "jdisc.thread_pool.work_queue.capacity", suffixes); addMetric(metrics, "jdisc.thread_pool.work_queue.size", suffixes); } metrics.add(new Metric("httpapi_latency.max")); metrics.add(new Metric("httpapi_latency.sum")); metrics.add(new Metric("httpapi_latency.count")); metrics.add(new Metric("httpapi_latency.average")); metrics.add(new Metric("httpapi_pending.max")); metrics.add(new Metric("httpapi_pending.sum")); metrics.add(new Metric("httpapi_pending.count")); metrics.add(new Metric("httpapi_pending.average")); metrics.add(new Metric("httpapi_num_operations.rate")); metrics.add(new Metric("httpapi_num_updates.rate")); metrics.add(new Metric("httpapi_num_removes.rate")); metrics.add(new Metric("httpapi_num_puts.rate")); metrics.add(new Metric("httpapi_succeeded.rate")); metrics.add(new Metric("httpapi_failed.rate")); metrics.add(new Metric("httpapi_parse_error.rate")); addMetric(metrics, "httpapi_condition_not_met", List.of("rate")); metrics.add(new Metric("mem.heap.total.average")); metrics.add(new Metric("mem.heap.free.average")); metrics.add(new Metric("mem.heap.used.average")); metrics.add(new Metric("mem.heap.used.max")); metrics.add(new Metric("jdisc.memory_mappings.max")); metrics.add(new Metric("jdisc.open_file_descriptors.max")); metrics.add(new Metric("jdisc.gc.count.average")); metrics.add(new Metric("jdisc.gc.count.max")); metrics.add(new Metric("jdisc.gc.count.last")); metrics.add(new Metric("jdisc.gc.ms.average")); metrics.add(new Metric("jdisc.gc.ms.max")); metrics.add(new Metric("jdisc.gc.ms.last")); metrics.add(new Metric("jdisc.deactivated_containers.total.last")); metrics.add(new Metric("jdisc.deactivated_containers.with_retained_refs.last")); metrics.add(new Metric("athenz-tenant-cert.expiry.seconds.last")); metrics.add(new Metric("container-iam-role.expiry.seconds")); metrics.add(new Metric("jdisc.http.request.prematurely_closed.rate")); addMetric(metrics, "jdisc.http.request.requests_per_connection", List.of("sum", "count", "min", "max", "average")); metrics.add(new Metric("http.status.1xx.rate")); metrics.add(new Metric("http.status.2xx.rate")); metrics.add(new Metric("http.status.3xx.rate")); metrics.add(new Metric("http.status.4xx.rate")); metrics.add(new Metric("http.status.5xx.rate")); metrics.add(new Metric("http.status.401.rate")); metrics.add(new Metric("http.status.403.rate")); metrics.add(new Metric("jdisc.http.request.uri_length.max")); metrics.add(new Metric("jdisc.http.request.uri_length.sum")); metrics.add(new Metric("jdisc.http.request.uri_length.count")); metrics.add(new Metric("jdisc.http.request.uri_length.average")); metrics.add(new Metric("jdisc.http.request.content_size.max")); metrics.add(new Metric("jdisc.http.request.content_size.sum")); metrics.add(new Metric("jdisc.http.request.content_size.count")); metrics.add(new Metric("jdisc.http.request.content_size.average")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.missing_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.expired_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.invalid_client_cert.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_protocols.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.incompatible_ciphers.rate")); metrics.add(new Metric("jdisc.http.ssl.handshake.failure.unknown.rate")); metrics.add(new Metric("jdisc.http.handler.unhandled_exceptions.rate")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.max", List.of("last")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.reserved", List.of("last")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.busy", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.jetty.threadpool.thread.total", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.jetty.threadpool.queue.size", List.of("sum", "count", "min", "max")); addMetric(metrics, "jdisc.http.filtering.request.handled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.request.unhandled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.response.handled", List.of("rate")); addMetric(metrics, "jdisc.http.filtering.response.unhandled", List.of("rate")); addMetric(metrics, "jdisc.application.failed_component_graphs", List.of("rate")); addMetric(metrics, "jdisc.http.filter.rule.blocked_requests", List.of("rate")); addMetric(metrics, "jdisc.http.filter.rule.allowed_requests", List.of("rate")); return metrics; } private static Set<Metric> getClusterControllerMetrics() { Set<Metric> metrics =new LinkedHashSet<>(); metrics.add(new Metric("cluster-controller.down.count.last")); metrics.add(new Metric("cluster-controller.initializing.count.last")); metrics.add(new Metric("cluster-controller.maintenance.count.last")); metrics.add(new Metric("cluster-controller.retired.count.last")); metrics.add(new Metric("cluster-controller.stopping.count.last")); metrics.add(new Metric("cluster-controller.up.count.last")); metrics.add(new Metric("cluster-controller.cluster-state-change.count")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.last")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.max")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.sum")); metrics.add(new Metric("cluster-controller.busy-tick-time-ms.count")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.last")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.max")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.sum")); metrics.add(new Metric("cluster-controller.idle-tick-time-ms.count")); metrics.add(new Metric("cluster-controller.work-ms.last")); metrics.add(new Metric("cluster-controller.work-ms.sum")); metrics.add(new Metric("cluster-controller.work-ms.count")); metrics.add(new Metric("cluster-controller.is-master.last")); metrics.add(new Metric("cluster-controller.remote-task-queue.size.last")); metrics.add(new Metric("cluster-controller.node-event.count")); metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.last")); metrics.add(new Metric("cluster-controller.resource_usage.nodes_above_limit.max")); metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.last")); metrics.add(new Metric("cluster-controller.resource_usage.max_memory_utilization.max")); metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.last")); metrics.add(new Metric("cluster-controller.resource_usage.max_disk_utilization.max")); metrics.add(new Metric("cluster-controller.resource_usage.disk_limit.last")); metrics.add(new Metric("cluster-controller.resource_usage.memory_limit.last")); metrics.add(new Metric("reindexing.progress.last")); return metrics; } private static Set<Metric> getDocprocMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("documents_processed.rate")); return metrics; } private static Set<Metric> getQrserverMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("peak_qps.max")); metrics.add(new Metric("search_connections.max")); metrics.add(new Metric("search_connections.sum")); metrics.add(new Metric("search_connections.count")); metrics.add(new Metric("search_connections.average")); metrics.add(new Metric("feed.latency.max")); metrics.add(new Metric("feed.latency.sum")); metrics.add(new Metric("feed.latency.count")); metrics.add(new Metric("feed.latency.average")); metrics.add(new Metric("feed.http-requests.count")); metrics.add(new Metric("feed.http-requests.rate")); metrics.add(new Metric("queries.rate")); metrics.add(new Metric("query_container_latency.max")); metrics.add(new Metric("query_container_latency.sum")); metrics.add(new Metric("query_container_latency.count")); metrics.add(new Metric("query_container_latency.average")); metrics.add(new Metric("query_latency.max")); metrics.add(new Metric("query_latency.sum")); metrics.add(new Metric("query_latency.count")); metrics.add(new Metric("query_latency.average")); metrics.add(new Metric("query_latency.95percentile")); metrics.add(new Metric("query_latency.99percentile")); metrics.add(new Metric("failed_queries.rate")); metrics.add(new Metric("degraded_queries.rate")); metrics.add(new Metric("hits_per_query.max")); metrics.add(new Metric("hits_per_query.sum")); metrics.add(new Metric("hits_per_query.count")); metrics.add(new Metric("hits_per_query.average")); metrics.add(new Metric("hits_per_query.95percentile")); metrics.add(new Metric("hits_per_query.99percentile")); metrics.add(new Metric("query_hit_offset.max")); metrics.add(new Metric("query_hit_offset.sum")); metrics.add(new Metric("query_hit_offset.count")); metrics.add(new Metric("documents_covered.count")); metrics.add(new Metric("documents_total.count")); metrics.add(new Metric("dispatch_internal.rate")); metrics.add(new Metric("dispatch_fdispatch.rate")); addMetric(metrics, "jdisc.render.latency", Set.of("min", "max", "count", "sum", "last", "average")); metrics.add(new Metric("totalhits_per_query.max")); metrics.add(new Metric("totalhits_per_query.sum")); metrics.add(new Metric("totalhits_per_query.count")); metrics.add(new Metric("totalhits_per_query.average")); metrics.add(new Metric("totalhits_per_query.95percentile")); metrics.add(new Metric("totalhits_per_query.99percentile")); metrics.add(new Metric("empty_results.rate")); metrics.add(new Metric("requestsOverQuota.rate")); metrics.add(new Metric("requestsOverQuota.count")); metrics.add(new Metric("relevance.at_1.sum")); metrics.add(new Metric("relevance.at_1.count")); metrics.add(new Metric("relevance.at_1.average")); metrics.add(new Metric("relevance.at_3.sum")); metrics.add(new Metric("relevance.at_3.count")); metrics.add(new Metric("relevance.at_3.average")); metrics.add(new Metric("relevance.at_10.sum")); metrics.add(new Metric("relevance.at_10.count")); metrics.add(new Metric("relevance.at_10.average")); metrics.add(new Metric("error.timeout.rate")); metrics.add(new Metric("error.backends_oos.rate")); metrics.add(new Metric("error.plugin_failure.rate")); metrics.add(new Metric("error.backend_communication_error.rate")); metrics.add(new Metric("error.empty_document_summaries.rate")); metrics.add(new Metric("error.invalid_query_parameter.rate")); metrics.add(new Metric("error.internal_server_error.rate")); metrics.add(new Metric("error.misconfigured_server.rate")); metrics.add(new Metric("error.invalid_query_transformation.rate")); metrics.add(new Metric("error.result_with_errors.rate")); metrics.add(new Metric("error.unspecified.rate")); metrics.add(new Metric("error.unhandled_exception.rate")); return metrics; } private static void addSearchNodeExecutorMetrics(Set<Metric> metrics, String prefix) { metrics.add(new Metric(prefix + ".queuesize.max")); metrics.add(new Metric(prefix + ".queuesize.sum")); metrics.add(new Metric(prefix + ".queuesize.count")); metrics.add(new Metric(prefix + ".maxpending.last")); metrics.add(new Metric(prefix + ".accepted.rate")); metrics.add(new Metric(prefix + ".wakeups.rate")); metrics.add(new Metric(prefix + ".utilization.max")); metrics.add(new Metric(prefix + ".utilization.sum")); metrics.add(new Metric(prefix + ".utilization.count")); } private static Set<Metric> getSearchNodeMetrics() { Set<Metric> metrics = new LinkedHashSet<>(); metrics.add(new Metric("content.proton.documentdb.documents.total.last")); metrics.add(new Metric("content.proton.documentdb.documents.ready.last")); metrics.add(new Metric("content.proton.documentdb.documents.active.last")); metrics.add(new Metric("content.proton.documentdb.documents.removed.last")); metrics.add(new Metric("content.proton.documentdb.index.docs_in_memory.last")); metrics.add(new Metric("content.proton.documentdb.disk_usage.last")); metrics.add(new Metric("content.proton.documentdb.memory_usage.allocated_bytes.max")); metrics.add(new Metric("content.proton.documentdb.heart_beat_age.last")); metrics.add(new Metric("content.proton.transport.query.count.rate")); metrics.add(new Metric("content.proton.docsum.docs.rate")); metrics.add(new Metric("content.proton.docsum.latency.max")); metrics.add(new Metric("content.proton.docsum.latency.sum")); metrics.add(new Metric("content.proton.docsum.latency.count")); metrics.add(new Metric("content.proton.docsum.latency.average")); metrics.add(new Metric("content.proton.transport.query.latency.max")); metrics.add(new Metric("content.proton.transport.query.latency.sum")); metrics.add(new Metric("content.proton.transport.query.latency.count")); metrics.add(new Metric("content.proton.transport.query.latency.average")); metrics.add(new Metric("content.proton.search_protocol.query.latency.max")); metrics.add(new Metric("content.proton.search_protocol.query.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.query.latency.count")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.query.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.latency.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.request_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.max")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.sum")); metrics.add(new Metric("content.proton.search_protocol.docsum.reply_size.count")); metrics.add(new Metric("content.proton.search_protocol.docsum.requested_documents.count")); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.proton"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.flush"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.match"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.docsum"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.shared"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.warmup"); addSearchNodeExecutorMetrics(metrics, "content.proton.executor.field_writer"); metrics.add(new Metric("content.proton.documentdb.job.total.average")); metrics.add(new Metric("content.proton.documentdb.job.attribute_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.memory_index_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.disk_index_fusion.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_flush.average")); metrics.add(new Metric("content.proton.documentdb.job.document_store_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.bucket_move.average")); metrics.add(new Metric("content.proton.documentdb.job.lid_space_compact.average")); metrics.add(new Metric("content.proton.documentdb.job.removed_documents_prune.average")); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.master"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.summary"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_inverter"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.index_field_writer"); addSearchNodeExecutorMetrics(metrics, "content.proton.documentdb.threading_service.attribute_field_writer"); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_bloat_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_fragmentation_factor.average")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.lid_limit.last")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.highest_used_lid.last")); metrics.add(new Metric("content.proton.documentdb.ready.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.notready.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.removed.lid_space.used_lids.last")); metrics.add(new Metric("content.proton.documentdb.bucket_move.buckets_pending.last")); metrics.add(new Metric("content.proton.resource_usage.disk.average")); metrics.add(new Metric("content.proton.resource_usage.disk_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.memory.average")); metrics.add(new Metric("content.proton.resource_usage.memory_utilization.average")); metrics.add(new Metric("content.proton.resource_usage.transient_memory.average")); metrics.add(new Metric("content.proton.resource_usage.transient_disk.average")); metrics.add(new Metric("content.proton.resource_usage.memory_mappings.max")); metrics.add(new Metric("content.proton.resource_usage.open_file_descriptors.max")); metrics.add(new Metric("content.proton.resource_usage.feeding_blocked.max")); metrics.add(new Metric("content.proton.resource_usage.malloc_arena.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.address_space.max")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.last")); metrics.add(new Metric("content.proton.documentdb.attribute.resource_usage.feeding_blocked.max")); metrics.add(new Metric("content.proton.transactionlog.entries.average")); metrics.add(new Metric("content.proton.transactionlog.disk_usage.average")); metrics.add(new Metric("content.proton.transactionlog.replay_time.last")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_usage.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.disk_bloat.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.max_bucket_spread.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.removed.document_store.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.ready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.memory_usage.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.hit_rate.average")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.lookups.rate")); metrics.add(new Metric("content.proton.documentdb.notready.document_store.cache.invalidations.rate")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.ready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.notready.attribute.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.allocated_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.used_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.dead_bytes.average")); metrics.add(new Metric("content.proton.documentdb.index.memory_usage.onhold_bytes.average")); metrics.add(new Metric("content.proton.documentdb.matching.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.max")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.sum")); metrics.add(new Metric("content.proton.documentdb.matching.docs_matched.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doomed_queries.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.min")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.soft_doom_factor.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_latency.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_collateral_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.query_setup_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.rerank_time.average")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.rate")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.max")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.sum")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.docs_matched.count")); metrics.add(new Metric("content.proton.documentdb.matching.rank_profile.limited_queries.rate")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.max")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.sum")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.count")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.operations.rate")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.max")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.sum")); metrics.add(new Metric("content.proton.documentdb.feeding.commit.latency.count")); return metrics; } private static void addMetric(Set<Metric> metrics, String metricName, Iterable<String> aggregateSuffices) { for (String suffix : aggregateSuffices) { metrics.add(new Metric(metricName + "." + suffix)); } } }
Consider adding a test case for the default value of 1 (where maxCompactBuffers is not specified in TestProperties).
public void default_max_compact_buffers_config_controlled_by_properties() { assertEquals(2, resolveMaxCompactBuffers(2)); assertEquals(7, resolveMaxCompactBuffers(7)); }
assertEquals(2, resolveMaxCompactBuffers(2));
public void default_max_compact_buffers_config_controlled_by_properties() { assertEquals(1, resolveMaxCompactBuffers(OptionalInt.empty())); assertEquals(2, resolveMaxCompactBuffers(OptionalInt.of(2))); assertEquals(7, resolveMaxCompactBuffers(OptionalInt.of(7))); }
class ContentClusterTest extends ContentBaseTest { private final static String HOSTS = "<admin version='2.0'><adminserver hostalias='mockhost' /></admin>"; @Rule public ExpectedException expectedException = ExpectedException.none(); ContentCluster parse(String xml) { xml = HOSTS + xml; TestRoot root = new TestDriver().buildModel(xml); return root.getConfigModels(Content.class).get(0).getCluster(); } @Test public void testHierarchicRedundancy() { ContentCluster cc = parse("" + "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy>15</redundancy>\n" + " <group name='root' distribution-key='0'>" + " <distribution partitions='1|1|*'/>" + " <group name='g-1' distribution-key='0'>" + " <node hostalias='mockhost' distribution-key='0'/>" + " <node hostalias='mockhost' distribution-key='1'/>" + " <node hostalias='mockhost' distribution-key='2'/>" + " <node hostalias='mockhost' distribution-key='3'/>" + " <node hostalias='mockhost' distribution-key='4'/>" + " </group>" + " <group name='g-2' distribution-key='1'>" + " <node hostalias='mockhost' distribution-key='5'/>" + " <node hostalias='mockhost' distribution-key='6'/>" + " <node hostalias='mockhost' distribution-key='7'/>" + " <node hostalias='mockhost' distribution-key='8'/>" + " <node hostalias='mockhost' distribution-key='9'/>" + " </group>" + " <group name='g-3' distribution-key='1'>" + " <node hostalias='mockhost' distribution-key='10'/>" + " <node hostalias='mockhost' distribution-key='11'/>" + " <node hostalias='mockhost' distribution-key='12'/>" + " <node hostalias='mockhost' distribution-key='13'/>" + " <node hostalias='mockhost' distribution-key='14'/>" + " </group>" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").ready_copies()); assertEquals(15, distributionConfig.cluster("storage").initial_redundancy()); assertEquals(15, distributionConfig.cluster("storage").redundancy()); assertEquals(4, distributionConfig.cluster("storage").group().size()); assertEquals(1, distributionConfig.cluster().size()); StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder(); cc.getConfig(storBuilder); StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder); assertEquals(15, storConfig.initial_redundancy()); assertEquals(15, storConfig.redundancy()); assertEquals(3, storConfig.ready_copies()); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(1, protonConfig.distribution().searchablecopies()); assertEquals(5, protonConfig.distribution().redundancy()); } @Test public void testRedundancy() { ContentCluster cc = parse("" + "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy reply-after='4'>5</redundancy>\n" + " <group>" + " <node hostalias='mockhost' distribution-key='0'/>" + " <node hostalias='mockhost' distribution-key='1'/>" + " <node hostalias='mockhost' distribution-key='2'/>" + " <node hostalias='mockhost' distribution-key='3'/>" + " <node hostalias='mockhost' distribution-key='4'/>" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").ready_copies()); assertEquals(4, distributionConfig.cluster("storage").initial_redundancy()); assertEquals(5, distributionConfig.cluster("storage").redundancy()); StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder(); cc.getConfig(storBuilder); StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder); assertEquals(4, storConfig.initial_redundancy()); assertEquals(5, storConfig.redundancy()); assertEquals(3, storConfig.ready_copies()); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(3, protonConfig.distribution().searchablecopies()); assertEquals(5, protonConfig.distribution().redundancy()); } @Test public void testNoId() { ContentCluster c = parse( "<content version=\"1.0\">\n" + " <redundancy>1</redundancy>\n" + " <documents/>" + " <redundancy reply-after=\"4\">5</redundancy>\n" + " <group>" + " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + " </group>" + "</content>" ); assertEquals("content", c.getName()); } @Test public void testRedundancyDefaults() { ContentCluster cc = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>" + " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + " <node hostalias=\"mockhost\" distribution-key=\"1\"/>\"" + " <node hostalias=\"mockhost\" distribution-key=\"2\"/>\"" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").redundancy()); StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); cc.getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals(2, config.initial_redundancy()); assertEquals(3, config.redundancy()); assertEquals(2, config.ready_copies()); } @Test public void testEndToEnd() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"configserver\" />\n" + " <logserver hostalias=\"logserver\" />\n" + " <slobroks>\n" + " <slobrok hostalias=\"configserver\" />\n" + " <slobrok hostalias=\"logserver\" />\n" + " </slobroks>\n" + " <cluster-controllers>\n" + " <cluster-controller hostalias=\"configserver\"/>" + " <cluster-controller hostalias=\"configserver2\"/>" + " <cluster-controller hostalias=\"configserver3\"/>" + " </cluster-controllers>\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document type=\"type2\" mode=\"index\"/>\n" + " </documents>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + " <tuning>" + " <cluster-controller>\n" + " <init-progress-time>34567</init-progress-time>" + " </cluster-controller>" + " </tuning>" + " </content>" + "\n" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(null, xml, sds).create(); assertEquals(2, model.getContentClusters().get("bar").getDocumentDefinitions().size()); ContainerCluster<?> cluster = model.getAdmin().getClusterControllers(); assertEquals(3, cluster.getContainers().size()); } VespaModel createEnd2EndOneNode(ModelContext.Properties properties) { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='index' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " </services>"; return createEnd2EndOneNode(properties, services); } VespaModel createEnd2EndOneNode(ModelContext.Properties properties, String services) { DeployState.Builder deployStateBuilder = new DeployState.Builder().properties(properties); List<String> sds = ApplicationPackageUtils.generateSchemas("type1"); return (new VespaModelCreatorWithMockPkg(null, services, sds)).create(deployStateBuilder); } @Test public void testEndToEndOneNode() { VespaModel model = createEnd2EndOneNode(new TestProperties()); assertEquals(1, model.getContentClusters().get("storage").getDocumentDefinitions().size()); ContainerCluster<?> cluster = model.getAdmin().getClusterControllers(); assertEquals(1, cluster.getContainers().size()); } @Test public void testSearchTuning() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"node0\" />\n" + " <cluster-controllers>\n" + " <cluster-controller hostalias=\"node0\"/>" + " </cluster-controllers>\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " <document type=\"type2\" mode='index'/>\n" + " </documents>\n" + " <group>" + " <node hostalias='node0' distribution-key='0'/>" + " </group>" + " <tuning>\n" + " <cluster-controller>" + " <init-progress-time>34567</init-progress-time>" + " </cluster-controller>" + " </tuning>" + " </content>" + "\n" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); assertTrue(model.getContentClusters().get("bar").getPersistence() instanceof ProtonEngine.Factory); { StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder(); model.getConfig(builder, "bar/distributor/0"); StorDistributormanagerConfig config = new StorDistributormanagerConfig(builder); assertFalse(config.inlinebucketsplitting()); } { StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder(); model.getConfig(builder, "bar/storage/0"); StorFilestorConfig config = new StorFilestorConfig(builder); assertFalse(config.enable_multibit_split_optimalization()); } } @Test public void testRedundancyRequired() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"node0\" />\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " </documents>\n" + " <group>\n" + " <node hostalias='node0' distribution-key='0'/>\n" + " </group>\n" + " </content>\n" + "</services>\n"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); try{ new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); fail("Deploying without redundancy should fail"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("missing required element \"redundancy\"")); } } @Test public void testRedundancyFinalLessThanInitial() { try { parse( "<content version=\"1.0\" id=\"storage\">\n" + " <redundancy reply-after=\"4\">2</redundancy>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + "</content>" ); fail("no exception thrown"); } catch (Exception e) { /* ignore */ } } @Test public void testReadyTooHigh() { try { parse( "<content version=\"1.0\" id=\"storage\">\n" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy>2</redundancy>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + "</content>" ); fail("no exception thrown"); } catch (Exception e) { /* ignore */ } } FleetcontrollerConfig getFleetControllerConfig(String xml) { ContentCluster cluster = parse(xml); FleetcontrollerConfig.Builder builder = new FleetcontrollerConfig.Builder(); cluster.getConfig(builder); cluster.getClusterControllerConfig().getConfig(builder); return new FleetcontrollerConfig(builder); } @Test public void testFleetControllerOverride() { { FleetcontrollerConfig config = getFleetControllerConfig( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertEquals(0, config.min_storage_up_ratio(), 0.01); assertEquals(0, config.min_distributor_up_ratio(), 0.01); assertEquals(1, config.min_storage_up_count()); assertEquals(1, config.min_distributors_up_count()); } { FleetcontrollerConfig config = getFleetControllerConfig( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"2\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"3\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"4\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"5\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertNotSame(0, config.min_storage_up_ratio()); } } @Test public void testImplicitDistributionBits() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); } @Test public void testExplicitDistributionBits() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + " <tuning>\n" + " <distribution type=\"strict\"/>\n" + " </tuning>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + " <tuning>\n" + " <distribution type=\"loose\"/>\n" + " </tuning>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); } @Test public void testZoneDependentDistributionBits() throws Exception { String xml = new ContentClusterBuilder().docTypes("test").getXml(); ContentCluster prodWith16Bits = createWithZone(xml, new Zone(Environment.prod, RegionName.from("us-east-3"))); assertDistributionBitsInConfig(prodWith16Bits, 16); ContentCluster stagingNot16Bits = createWithZone(xml, new Zone(Environment.staging, RegionName.from("us-east-3"))); assertDistributionBitsInConfig(stagingNot16Bits, 8); } @Test public void testGenerateSearchNodes() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton/>" + " </engine>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); } { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("1").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); } } @Test public void testAlternativeNodeSyntax() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"test\">\n" + " <documents/>" + " <engine>" + " <proton/>" + " </engine>" + " <nodes>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " </nodes>\n" + "</content>" ); DistributionConfig.Builder bob = new DistributionConfig.Builder(); cluster.getConfig(bob); DistributionConfig.Cluster.Group group = bob.build().cluster("test").group(0); assertEquals("invalid", group.name()); assertEquals("invalid", group.index()); assertEquals(2, group.nodes().size()); StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); cluster.getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals("invalid", config.group(0).name()); assertEquals("invalid", config.group(0).index()); assertEquals(2, config.group(0).nodes().size()); } @Test public void testReadyWhenInitialOne() { StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <redundancy>1</redundancy>\n" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>" ).getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals(1, config.initial_redundancy()); assertEquals(1, config.redundancy()); assertEquals(1, config.ready_copies()); } public void testProvider(String tagName, StorServerConfig.Persistence_provider.Type.Enum type) { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <redundancy>3</redundancy>" + " <engine>\n" + " <" + tagName + "/>\n" + " </engine>\n" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>" ); { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); assertEquals(type, config.persistence_provider().type()); } { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); cluster.getDistributorNodes().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); assertEquals(type, config.persistence_provider().type()); } } @Test public void testProviders() { testProvider("proton", StorServerConfig.Persistence_provider.Type.RPC); testProvider("dummy", StorServerConfig.Persistence_provider.Type.DUMMY); } @Test public void testMetrics() { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); ContentCluster cluster = parse("<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); cluster.getConfig(builder); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); assertEquals(6, config.consumer().size()); assertEquals("status", config.consumer(0).name()); assertEquals("*", config.consumer(0).addedmetrics(0)); assertEquals("partofsum", config.consumer(0).removedtags(0)); assertEquals("log", config.consumer(1).name()); assertEquals("logdefault", config.consumer(1).tags().get(0)); assertEquals("loadtype", config.consumer(1).removedtags(0)); assertEquals("yamas", config.consumer(2).name()); assertEquals("yamasdefault", config.consumer(2).tags().get(0)); assertEquals("loadtype", config.consumer(2).removedtags(0)); assertEquals("health", config.consumer(3).name()); assertEquals("statereporter", config.consumer(5).name()); assertEquals("*", config.consumer(5).addedmetrics(0)); assertEquals("thread", config.consumer(5).removedtags(0)); assertEquals("partofsum", config.consumer(5).removedtags(1)); assertEquals(0, config.consumer(5).tags().size()); cluster.getStorageCluster().getConfig(builder); config = new MetricsmanagerConfig(builder); assertEquals(6, config.consumer().size()); assertEquals("fleetcontroller", config.consumer(4).name()); assertEquals(4, config.consumer(4).addedmetrics().size()); assertEquals("vds.datastored.alldisks.docs", config.consumer(4).addedmetrics(0)); assertEquals("vds.datastored.alldisks.bytes", config.consumer(4).addedmetrics(1)); assertEquals("vds.datastored.alldisks.buckets", config.consumer(4).addedmetrics(2)); assertEquals("vds.datastored.bucket_space.buckets_total", config.consumer(4).addedmetrics(3)); } public MetricsmanagerConfig.Consumer getConsumer(String consumer, MetricsmanagerConfig config) { for (MetricsmanagerConfig.Consumer c : config.consumer()) { if (c.name().equals(consumer)) { return c; } } return null; } @Test public void testConfiguredMetrics() { String xml = "" + "<services>" + "<content version=\"1.0\" id=\"storage\">\n" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " <document type=\"type2\" mode='index'/>\n" + " </documents>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"node0\"/>\n" + " </group>\n" + "</content>" + "<admin version=\"2.0\">" + " <logserver hostalias=\"node0\"/>" + " <adminserver hostalias=\"node0\"/>" + "</admin>" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); model.getConfig(builder, "storage/storage/0"); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); String expected = "[vds.filestor.alldisks.allthreads.put.sum\n" + "vds.filestor.alldisks.allthreads.get.sum\n" + "vds.filestor.alldisks.allthreads.remove.sum\n" + "vds.filestor.alldisks.allthreads.update.sum\n" + "vds.datastored.alldisks.docs\n" + "vds.datastored.alldisks.bytes\n" + "vds.filestor.alldisks.queuesize\n" + "vds.filestor.alldisks.averagequeuewait.sum\n" + "vds.visitor.cv_queuewaittime\n" + "vds.visitor.allthreads.averagequeuewait\n" + "vds.visitor.allthreads.averagevisitorlifetime\n" + "vds.visitor.allthreads.created.sum]"; String actual = getConsumer("log", config).addedmetrics().toString().replaceAll(", ", "\n"); assertEquals(expected, actual); assertEquals("[logdefault]", getConsumer("log", config).tags().toString()); expected = "[vds.datastored.alldisks.docs\n" + "vds.datastored.alldisks.bytes\n" + "vds.datastored.alldisks.buckets\n" + "vds.datastored.bucket_space.buckets_total]"; actual = getConsumer("fleetcontroller", config).addedmetrics().toString().replaceAll(", ", "\n"); assertEquals(expected, actual); } { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); model.getConfig(builder, "storage/distributor/0"); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); assertEquals("[logdefault]", getConsumer("log", config).tags().toString()); } } @Test public void flush_on_shutdown_is_default_on_for_non_hosted() throws Exception { assertPrepareRestartCommand(createOneNodeCluster(false)); } @Test public void flush_on_shutdown_can_be_turned_off_for_non_hosted() throws Exception { assertNoPreShutdownCommand(createClusterWithFlushOnShutdownOverride(false, false)); } @Test public void flush_on_shutdown_is_default_on_for_hosted() throws Exception { assertPrepareRestartCommand(createOneNodeCluster(true)); } @Test public void flush_on_shutdown_can_be_turned_on_for_hosted() throws Exception { assertPrepareRestartCommand(createClusterWithFlushOnShutdownOverride(true, true)); } private static String oneNodeClusterXml() { return "<content version=\"1.0\" id=\"mockcluster\">" + " <documents/>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>"; } private static ContentCluster createOneNodeCluster(boolean isHostedVespa) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), new TestProperties().setHostedVespa(isHostedVespa)); } private static ContentCluster createOneNodeCluster(TestProperties props) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), props); } private static ContentCluster createOneNodeCluster(TestProperties props, Optional<Flavor> flavor) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), props, flavor); } private static ContentCluster createClusterWithFlushOnShutdownOverride(boolean flushOnShutdown, boolean isHostedVespa) throws Exception { return createOneNodeCluster("<content version=\"1.0\" id=\"mockcluster\">" + " <documents/>" + " <engine>" + " <proton>" + " <flush-on-shutdown>" + flushOnShutdown + "</flush-on-shutdown>" + " </proton>" + " </engine>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>", new TestProperties().setHostedVespa(isHostedVespa)); } private static ContentCluster createOneNodeCluster(String clusterXml, TestProperties props) throws Exception { return createOneNodeCluster(clusterXml, props, Optional.empty()); } private static ContentCluster createOneNodeCluster(String clusterXml, TestProperties props, Optional<Flavor> flavor) throws Exception { DeployState.Builder deployStateBuilder = new DeployState.Builder() .properties(props); MockRoot root = flavor.isPresent() ? ContentClusterUtils.createMockRoot(new SingleNodeProvisioner(flavor.get()), Collections.emptyList(), deployStateBuilder) : ContentClusterUtils.createMockRoot(Collections.emptyList(), deployStateBuilder); ContentCluster cluster = ContentClusterUtils.createCluster(clusterXml, root); root.freezeModelTopology(); cluster.validate(); return cluster; } private static void assertPrepareRestartCommand(ContentCluster cluster) { Optional<String> command = cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand(); assertTrue(command.isPresent()); assertTrue(command.get().matches(".*vespa-proton-cmd [0-9]+ prepareRestart")); } private static void assertNoPreShutdownCommand(ContentCluster cluster) { Optional<String> command = cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand(); assertFalse(command.isPresent()); } @Test public void reserved_document_name_throws_exception() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("The following document types conflict with reserved keyword names: 'true'."); String xml = "<content version=\"1.0\" id=\"storage\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"true\" mode=\"index\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>"; List<String> sds = ApplicationPackageUtils.generateSchemas("true"); new VespaModelCreatorWithMockPkg(null, xml, sds).create(); } private void assertClusterHasBucketSpaceMappings(AllClustersBucketSpacesConfig config, String clusterId, List<String> defaultSpaceTypes, List<String> globalSpaceTypes) { AllClustersBucketSpacesConfig.Cluster cluster = config.cluster(clusterId); assertNotNull(cluster); assertEquals(defaultSpaceTypes.size() + globalSpaceTypes.size(), cluster.documentType().size()); assertClusterHasTypesInBucketSpace(cluster, "default", defaultSpaceTypes); assertClusterHasTypesInBucketSpace(cluster, "global", globalSpaceTypes); } private void assertClusterHasTypesInBucketSpace(AllClustersBucketSpacesConfig.Cluster cluster, String bucketSpace, List<String> expectedTypes) { for (String type : expectedTypes) { assertNotNull(cluster.documentType(type)); assertEquals(bucketSpace, cluster.documentType(type).bucketSpace()); } } private VespaModel createDualContentCluster() { String xml = "<services>" + "<admin version=\"2.0\">" + " <adminserver hostalias=\"node0\"/>" + "</admin>" + "<content version=\"1.0\" id=\"foo_c\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"bunnies\" mode=\"index\"/>" + " <document type=\"hares\" mode=\"index\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"node0\"/>" + " </group>" + "</content>" + "<content version=\"1.0\" id=\"bar_c\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"rabbits\" mode=\"index\" global=\"true\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"node0\"/>" + " </group>" + "</content>" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("bunnies", "hares", "rabbits"); return new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); } @Test public void all_clusters_bucket_spaces_config_contains_mappings_across_all_clusters() { VespaModel model = createDualContentCluster(); AllClustersBucketSpacesConfig.Builder builder = new AllClustersBucketSpacesConfig.Builder(); model.getConfig(builder, "client"); AllClustersBucketSpacesConfig config = builder.build(); assertEquals(2, config.cluster().size()); assertClusterHasBucketSpaceMappings(config, "foo_c", Arrays.asList("bunnies", "hares"), Collections.emptyList()); assertClusterHasBucketSpaceMappings(config, "bar_c", Collections.emptyList(), Collections.singletonList("rabbits")); } @Test public void test_routing_with_multiple_clusters() { VespaModel model = createDualContentCluster(); Routing routing = model.getRouting(); assertNotNull(routing); assertEquals("[]", routing.getErrors().toString()); assertEquals(1, routing.getProtocols().size()); DocumentProtocol protocol = (DocumentProtocol) routing.getProtocols().get(0); RoutingTableSpec spec = protocol.getRoutingTableSpec(); assertEquals(3, spec.getNumHops()); assertEquals("docproc/cluster.bar_c.indexing/chain.indexing", spec.getHop(0).getName()); assertEquals("docproc/cluster.foo_c.indexing/chain.indexing", spec.getHop(1).getName()); assertEquals("indexing", spec.getHop(2).getName()); assertEquals(10, spec.getNumRoutes()); assertRoute(spec.getRoute(0), "bar_c", "[MessageType:bar_c]"); assertRoute(spec.getRoute(1), "bar_c-direct", "[Content:cluster=bar_c]"); assertRoute(spec.getRoute(2), "bar_c-index", "docproc/cluster.bar_c.indexing/chain.indexing", "[Content:cluster=bar_c]"); assertRoute(spec.getRoute(3), "default", "indexing"); assertRoute(spec.getRoute(4), "default-get", "indexing"); assertRoute(spec.getRoute(5), "foo_c", "[MessageType:foo_c]"); assertRoute(spec.getRoute(6), "foo_c-direct", "[Content:cluster=foo_c]"); assertRoute(spec.getRoute(7), "foo_c-index", "docproc/cluster.foo_c.indexing/chain.indexing", "[Content:cluster=foo_c]"); assertRoute(spec.getRoute(8), "storage/cluster.bar_c", "route:bar_c"); assertRoute(spec.getRoute(9), "storage/cluster.foo_c", "route:foo_c"); } private ContentCluster createWithZone(String clusterXml, Zone zone) throws Exception { DeployState.Builder deployStateBuilder = new DeployState.Builder() .zone(zone) .properties(new TestProperties().setHostedVespa(true)); List<String> schemas = SchemaBuilder.createSchemas("test"); MockRoot root = ContentClusterUtils.createMockRoot(schemas, deployStateBuilder); ContentCluster cluster = ContentClusterUtils.createCluster(clusterXml, root); root.freezeModelTopology(); cluster.validate(); return cluster; } private void assertDistributionBitsInConfig(ContentCluster cluster, int distributionBits) { FleetcontrollerConfig.Builder builder = new FleetcontrollerConfig.Builder(); cluster.getConfig(builder); cluster.getClusterControllerConfig().getConfig(builder); FleetcontrollerConfig config = new FleetcontrollerConfig(builder); assertEquals(distributionBits, config.ideal_distribution_bits()); StorDistributormanagerConfig.Builder sdBuilder = new StorDistributormanagerConfig.Builder(); cluster.getConfig(sdBuilder); StorDistributormanagerConfig storDistributormanagerConfig = new StorDistributormanagerConfig(sdBuilder); assertEquals(distributionBits, storDistributormanagerConfig.minsplitcount()); } private void verifyTopKProbabilityPropertiesControl() { VespaModel model = createEnd2EndOneNode(new TestProperties()); ContentCluster cc = model.getContentClusters().get("storage"); DispatchConfig.Builder builder = new DispatchConfig.Builder(); cc.getSearch().getConfig(builder); DispatchConfig cfg = new DispatchConfig(builder); assertEquals(0.9999, cfg.topKProbability(), 0.0); } @Test public void default_topKprobability_controlled_by_properties() { verifyTopKProbabilityPropertiesControl(); } private boolean resolveThreePhaseUpdateConfigWithFeatureFlag(boolean flagEnableThreePhase) { VespaModel model = createEnd2EndOneNode(new TestProperties().setUseThreePhaseUpdates(flagEnableThreePhase)); ContentCluster cc = model.getContentClusters().get("storage"); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).enable_metadata_only_fetch_phase_for_inconsistent_updates(); } @Test public void default_distributor_three_phase_update_config_controlled_by_properties() { assertFalse(resolveThreePhaseUpdateConfigWithFeatureFlag(false)); assertTrue(resolveThreePhaseUpdateConfigWithFeatureFlag(true)); } private int resolveMaxCompactBuffers(int maxCompactBuffers) { VespaModel model = createEnd2EndOneNode(new TestProperties().maxCompactBuffers(maxCompactBuffers)); ContentCluster cc = model.getContentClusters().get("storage"); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(1, protonConfig.documentdb().size()); return protonConfig.documentdb(0).allocation().max_compact_buffers(); } @Test void assertZookeeperServerImplementation(String expectedClassName, ClusterControllerContainerCluster clusterControllerCluster) { for (ClusterControllerContainer c : clusterControllerCluster.getContainers()) { var builder = new ComponentsConfig.Builder(); c.getConfig(builder); assertEquals(1, new ComponentsConfig(builder).components().stream() .filter(component -> component.classId().equals(expectedClassName)) .count()); } } private StorDistributormanagerConfig resolveStorDistributormanagerConfig(TestProperties props) throws Exception { var cc = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)); } private int resolveMaxInhibitedGroupsConfigWithFeatureFlag(int maxGroups) throws Exception { var cfg = resolveStorDistributormanagerConfig(new TestProperties().maxActivationInhibitedOutOfSyncGroups(maxGroups)); return cfg.max_activation_inhibited_out_of_sync_groups(); } @Test public void default_distributor_max_inhibited_group_activation_config_controlled_by_properties() throws Exception { assertEquals(0, resolveMaxInhibitedGroupsConfigWithFeatureFlag(0)); assertEquals(2, resolveMaxInhibitedGroupsConfigWithFeatureFlag(2)); } private int resolveNumDistributorStripesConfig(Optional<Flavor> flavor) throws Exception { var cc = createOneNodeCluster(new TestProperties(), flavor); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getChildren().get("0").getConfig(builder); return (new StorDistributormanagerConfig(builder)).num_distributor_stripes(); } private int resolveTunedNumDistributorStripesConfig(int numCpuCores) throws Exception { var flavor = new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().name("test").minCpuCores(numCpuCores))); return resolveNumDistributorStripesConfig(Optional.of(flavor)); } @Test public void num_distributor_stripes_config_defaults_to_zero() throws Exception { assertEquals(0, resolveNumDistributorStripesConfig(Optional.empty())); } @Test public void num_distributor_stripes_config_tuned_by_flavor() throws Exception { assertEquals(1, resolveTunedNumDistributorStripesConfig(1)); assertEquals(1, resolveTunedNumDistributorStripesConfig(16)); assertEquals(2, resolveTunedNumDistributorStripesConfig(17)); assertEquals(2, resolveTunedNumDistributorStripesConfig(64)); assertEquals(4, resolveTunedNumDistributorStripesConfig(65)); } @Test public void distributor_merge_busy_wait_controlled_by_properties() throws Exception { assertEquals(10, resolveDistributorMergeBusyWaitConfig(Optional.empty())); assertEquals(1, resolveDistributorMergeBusyWaitConfig(Optional.of(1))); } private int resolveDistributorMergeBusyWaitConfig(Optional<Integer> mergeBusyWait) throws Exception { var props = new TestProperties(); if (mergeBusyWait.isPresent()) { props.setDistributorMergeBusyWait(mergeBusyWait.get()); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).inhibit_merge_sending_on_busy_node_duration_sec(); } @Test public void distributor_enhanced_maintenance_scheduling_controlled_by_properties() throws Exception { assertFalse(resolveDistributorEnhancedSchedulingConfig(false)); assertTrue(resolveDistributorEnhancedSchedulingConfig(true)); } private boolean resolveDistributorEnhancedSchedulingConfig(boolean enhancedScheduling) throws Exception { var props = new TestProperties(); if (enhancedScheduling) { props.distributorEnhancedMaintenanceScheduling(enhancedScheduling); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).implicitly_clear_bucket_priority_on_schedule(); } @Test public void unordered_merge_chaining_config_controlled_by_properties() throws Exception { assertFalse(resolveUnorderedMergeChainingConfig(false)); assertTrue(resolveUnorderedMergeChainingConfig(true)); } private boolean resolveUnorderedMergeChainingConfig(boolean unorderedMergeChaining) throws Exception { var props = new TestProperties(); if (unorderedMergeChaining) { props.setUnorderedMergeChaining(true); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).use_unordered_merge_chaining(); } @Test public void testDedicatedClusterControllers() { VespaModel noContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " </services>"); assertEquals(Map.of(), noContentModel.getContentClusters()); assertNull("No cluster controller without content", noContentModel.getAdmin().getClusterControllers()); VespaModel oneContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " <content id='storage' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " </content>" + " </services>"); assertNotNull("Shared cluster controller with content", oneContentModel.getAdmin().getClusterControllers()); String twoContentServices = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " <content id='storage' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " <tuning>" + " <cluster-controller>" + " <min-distributor-up-ratio>0.618</min-distributor-up-ratio>" + " </cluster-controller>" + " </tuning>" + " </content>" + " <content id='dev-null' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " <tuning>" + " <cluster-controller>" + " <min-distributor-up-ratio>0.418</min-distributor-up-ratio>" + " </cluster-controller>" + " </tuning>" + " </content>" + " </services>"; VespaModel twoContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), twoContentServices); assertNotNull("Shared cluster controller with content", twoContentModel.getAdmin().getClusterControllers()); ClusterControllerContainerCluster clusterControllers = twoContentModel.getAdmin().getClusterControllers(); assertEquals(1, clusterControllers.reindexingContext().documentTypesForCluster("storage").size()); assertEquals(1, clusterControllers.reindexingContext().documentTypesForCluster("dev-null").size()); var storageBuilder = new FleetcontrollerConfig.Builder(); var devNullBuilder = new FleetcontrollerConfig.Builder(); twoContentModel.getConfig(storageBuilder, "admin/standalone/cluster-controllers/0/components/clustercontroller-storage-configurer"); twoContentModel.getConfig(devNullBuilder, "admin/standalone/cluster-controllers/0/components/clustercontroller-dev-null-configurer"); assertEquals(0.618, storageBuilder.build().min_distributor_up_ratio(), 1e-9); assertEquals(0.418, devNullBuilder.build().min_distributor_up_ratio(), 1e-9); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", clusterControllers); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.Reconfigurer", clusterControllers); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", clusterControllers); } }
class ContentClusterTest extends ContentBaseTest { private final static String HOSTS = "<admin version='2.0'><adminserver hostalias='mockhost' /></admin>"; @Rule public ExpectedException expectedException = ExpectedException.none(); ContentCluster parse(String xml) { xml = HOSTS + xml; TestRoot root = new TestDriver().buildModel(xml); return root.getConfigModels(Content.class).get(0).getCluster(); } @Test public void testHierarchicRedundancy() { ContentCluster cc = parse("" + "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy>15</redundancy>\n" + " <group name='root' distribution-key='0'>" + " <distribution partitions='1|1|*'/>" + " <group name='g-1' distribution-key='0'>" + " <node hostalias='mockhost' distribution-key='0'/>" + " <node hostalias='mockhost' distribution-key='1'/>" + " <node hostalias='mockhost' distribution-key='2'/>" + " <node hostalias='mockhost' distribution-key='3'/>" + " <node hostalias='mockhost' distribution-key='4'/>" + " </group>" + " <group name='g-2' distribution-key='1'>" + " <node hostalias='mockhost' distribution-key='5'/>" + " <node hostalias='mockhost' distribution-key='6'/>" + " <node hostalias='mockhost' distribution-key='7'/>" + " <node hostalias='mockhost' distribution-key='8'/>" + " <node hostalias='mockhost' distribution-key='9'/>" + " </group>" + " <group name='g-3' distribution-key='1'>" + " <node hostalias='mockhost' distribution-key='10'/>" + " <node hostalias='mockhost' distribution-key='11'/>" + " <node hostalias='mockhost' distribution-key='12'/>" + " <node hostalias='mockhost' distribution-key='13'/>" + " <node hostalias='mockhost' distribution-key='14'/>" + " </group>" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").ready_copies()); assertEquals(15, distributionConfig.cluster("storage").initial_redundancy()); assertEquals(15, distributionConfig.cluster("storage").redundancy()); assertEquals(4, distributionConfig.cluster("storage").group().size()); assertEquals(1, distributionConfig.cluster().size()); StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder(); cc.getConfig(storBuilder); StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder); assertEquals(15, storConfig.initial_redundancy()); assertEquals(15, storConfig.redundancy()); assertEquals(3, storConfig.ready_copies()); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(1, protonConfig.distribution().searchablecopies()); assertEquals(5, protonConfig.distribution().redundancy()); } @Test public void testRedundancy() { ContentCluster cc = parse("" + "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy reply-after='4'>5</redundancy>\n" + " <group>" + " <node hostalias='mockhost' distribution-key='0'/>" + " <node hostalias='mockhost' distribution-key='1'/>" + " <node hostalias='mockhost' distribution-key='2'/>" + " <node hostalias='mockhost' distribution-key='3'/>" + " <node hostalias='mockhost' distribution-key='4'/>" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").ready_copies()); assertEquals(4, distributionConfig.cluster("storage").initial_redundancy()); assertEquals(5, distributionConfig.cluster("storage").redundancy()); StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder(); cc.getConfig(storBuilder); StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder); assertEquals(4, storConfig.initial_redundancy()); assertEquals(5, storConfig.redundancy()); assertEquals(3, storConfig.ready_copies()); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(3, protonConfig.distribution().searchablecopies()); assertEquals(5, protonConfig.distribution().redundancy()); } @Test public void testNoId() { ContentCluster c = parse( "<content version=\"1.0\">\n" + " <redundancy>1</redundancy>\n" + " <documents/>" + " <redundancy reply-after=\"4\">5</redundancy>\n" + " <group>" + " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + " </group>" + "</content>" ); assertEquals("content", c.getName()); } @Test public void testRedundancyDefaults() { ContentCluster cc = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>" + " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + " <node hostalias=\"mockhost\" distribution-key=\"1\"/>\"" + " <node hostalias=\"mockhost\" distribution-key=\"2\"/>\"" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").redundancy()); StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); cc.getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals(2, config.initial_redundancy()); assertEquals(3, config.redundancy()); assertEquals(2, config.ready_copies()); } @Test public void testEndToEnd() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"configserver\" />\n" + " <logserver hostalias=\"logserver\" />\n" + " <slobroks>\n" + " <slobrok hostalias=\"configserver\" />\n" + " <slobrok hostalias=\"logserver\" />\n" + " </slobroks>\n" + " <cluster-controllers>\n" + " <cluster-controller hostalias=\"configserver\"/>" + " <cluster-controller hostalias=\"configserver2\"/>" + " <cluster-controller hostalias=\"configserver3\"/>" + " </cluster-controllers>\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document type=\"type2\" mode=\"index\"/>\n" + " </documents>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + " <tuning>" + " <cluster-controller>\n" + " <init-progress-time>34567</init-progress-time>" + " </cluster-controller>" + " </tuning>" + " </content>" + "\n" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(null, xml, sds).create(); assertEquals(2, model.getContentClusters().get("bar").getDocumentDefinitions().size()); ContainerCluster<?> cluster = model.getAdmin().getClusterControllers(); assertEquals(3, cluster.getContainers().size()); } VespaModel createEnd2EndOneNode(ModelContext.Properties properties) { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='index' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " </services>"; return createEnd2EndOneNode(properties, services); } VespaModel createEnd2EndOneNode(ModelContext.Properties properties, String services) { DeployState.Builder deployStateBuilder = new DeployState.Builder().properties(properties); List<String> sds = ApplicationPackageUtils.generateSchemas("type1"); return (new VespaModelCreatorWithMockPkg(null, services, sds)).create(deployStateBuilder); } @Test public void testEndToEndOneNode() { VespaModel model = createEnd2EndOneNode(new TestProperties()); assertEquals(1, model.getContentClusters().get("storage").getDocumentDefinitions().size()); ContainerCluster<?> cluster = model.getAdmin().getClusterControllers(); assertEquals(1, cluster.getContainers().size()); } @Test public void testSearchTuning() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"node0\" />\n" + " <cluster-controllers>\n" + " <cluster-controller hostalias=\"node0\"/>" + " </cluster-controllers>\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " <document type=\"type2\" mode='index'/>\n" + " </documents>\n" + " <group>" + " <node hostalias='node0' distribution-key='0'/>" + " </group>" + " <tuning>\n" + " <cluster-controller>" + " <init-progress-time>34567</init-progress-time>" + " </cluster-controller>" + " </tuning>" + " </content>" + "\n" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); assertTrue(model.getContentClusters().get("bar").getPersistence() instanceof ProtonEngine.Factory); { StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder(); model.getConfig(builder, "bar/distributor/0"); StorDistributormanagerConfig config = new StorDistributormanagerConfig(builder); assertFalse(config.inlinebucketsplitting()); } { StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder(); model.getConfig(builder, "bar/storage/0"); StorFilestorConfig config = new StorFilestorConfig(builder); assertFalse(config.enable_multibit_split_optimalization()); } } @Test public void testRedundancyRequired() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"node0\" />\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " </documents>\n" + " <group>\n" + " <node hostalias='node0' distribution-key='0'/>\n" + " </group>\n" + " </content>\n" + "</services>\n"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); try{ new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); fail("Deploying without redundancy should fail"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("missing required element \"redundancy\"")); } } @Test public void testRedundancyFinalLessThanInitial() { try { parse( "<content version=\"1.0\" id=\"storage\">\n" + " <redundancy reply-after=\"4\">2</redundancy>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + "</content>" ); fail("no exception thrown"); } catch (Exception e) { /* ignore */ } } @Test public void testReadyTooHigh() { try { parse( "<content version=\"1.0\" id=\"storage\">\n" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy>2</redundancy>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + "</content>" ); fail("no exception thrown"); } catch (Exception e) { /* ignore */ } } FleetcontrollerConfig getFleetControllerConfig(String xml) { ContentCluster cluster = parse(xml); FleetcontrollerConfig.Builder builder = new FleetcontrollerConfig.Builder(); cluster.getConfig(builder); cluster.getClusterControllerConfig().getConfig(builder); return new FleetcontrollerConfig(builder); } @Test public void testFleetControllerOverride() { { FleetcontrollerConfig config = getFleetControllerConfig( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertEquals(0, config.min_storage_up_ratio(), 0.01); assertEquals(0, config.min_distributor_up_ratio(), 0.01); assertEquals(1, config.min_storage_up_count()); assertEquals(1, config.min_distributors_up_count()); } { FleetcontrollerConfig config = getFleetControllerConfig( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"2\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"3\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"4\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"5\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertNotSame(0, config.min_storage_up_ratio()); } } @Test public void testImplicitDistributionBits() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); } @Test public void testExplicitDistributionBits() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + " <tuning>\n" + " <distribution type=\"strict\"/>\n" + " </tuning>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + " <tuning>\n" + " <distribution type=\"loose\"/>\n" + " </tuning>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); } @Test public void testZoneDependentDistributionBits() throws Exception { String xml = new ContentClusterBuilder().docTypes("test").getXml(); ContentCluster prodWith16Bits = createWithZone(xml, new Zone(Environment.prod, RegionName.from("us-east-3"))); assertDistributionBitsInConfig(prodWith16Bits, 16); ContentCluster stagingNot16Bits = createWithZone(xml, new Zone(Environment.staging, RegionName.from("us-east-3"))); assertDistributionBitsInConfig(stagingNot16Bits, 8); } @Test public void testGenerateSearchNodes() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton/>" + " </engine>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); } { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("1").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); } } @Test public void testAlternativeNodeSyntax() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"test\">\n" + " <documents/>" + " <engine>" + " <proton/>" + " </engine>" + " <nodes>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " </nodes>\n" + "</content>" ); DistributionConfig.Builder bob = new DistributionConfig.Builder(); cluster.getConfig(bob); DistributionConfig.Cluster.Group group = bob.build().cluster("test").group(0); assertEquals("invalid", group.name()); assertEquals("invalid", group.index()); assertEquals(2, group.nodes().size()); StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); cluster.getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals("invalid", config.group(0).name()); assertEquals("invalid", config.group(0).index()); assertEquals(2, config.group(0).nodes().size()); } @Test public void testReadyWhenInitialOne() { StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <redundancy>1</redundancy>\n" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>" ).getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals(1, config.initial_redundancy()); assertEquals(1, config.redundancy()); assertEquals(1, config.ready_copies()); } public void testProvider(String tagName, StorServerConfig.Persistence_provider.Type.Enum type) { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <redundancy>3</redundancy>" + " <engine>\n" + " <" + tagName + "/>\n" + " </engine>\n" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>" ); { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); assertEquals(type, config.persistence_provider().type()); } { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); cluster.getDistributorNodes().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); assertEquals(type, config.persistence_provider().type()); } } @Test public void testProviders() { testProvider("proton", StorServerConfig.Persistence_provider.Type.RPC); testProvider("dummy", StorServerConfig.Persistence_provider.Type.DUMMY); } @Test public void testMetrics() { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); ContentCluster cluster = parse("<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); cluster.getConfig(builder); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); assertEquals(6, config.consumer().size()); assertEquals("status", config.consumer(0).name()); assertEquals("*", config.consumer(0).addedmetrics(0)); assertEquals("partofsum", config.consumer(0).removedtags(0)); assertEquals("log", config.consumer(1).name()); assertEquals("logdefault", config.consumer(1).tags().get(0)); assertEquals("loadtype", config.consumer(1).removedtags(0)); assertEquals("yamas", config.consumer(2).name()); assertEquals("yamasdefault", config.consumer(2).tags().get(0)); assertEquals("loadtype", config.consumer(2).removedtags(0)); assertEquals("health", config.consumer(3).name()); assertEquals("statereporter", config.consumer(5).name()); assertEquals("*", config.consumer(5).addedmetrics(0)); assertEquals("thread", config.consumer(5).removedtags(0)); assertEquals("partofsum", config.consumer(5).removedtags(1)); assertEquals(0, config.consumer(5).tags().size()); cluster.getStorageCluster().getConfig(builder); config = new MetricsmanagerConfig(builder); assertEquals(6, config.consumer().size()); assertEquals("fleetcontroller", config.consumer(4).name()); assertEquals(4, config.consumer(4).addedmetrics().size()); assertEquals("vds.datastored.alldisks.docs", config.consumer(4).addedmetrics(0)); assertEquals("vds.datastored.alldisks.bytes", config.consumer(4).addedmetrics(1)); assertEquals("vds.datastored.alldisks.buckets", config.consumer(4).addedmetrics(2)); assertEquals("vds.datastored.bucket_space.buckets_total", config.consumer(4).addedmetrics(3)); } public MetricsmanagerConfig.Consumer getConsumer(String consumer, MetricsmanagerConfig config) { for (MetricsmanagerConfig.Consumer c : config.consumer()) { if (c.name().equals(consumer)) { return c; } } return null; } @Test public void testConfiguredMetrics() { String xml = "" + "<services>" + "<content version=\"1.0\" id=\"storage\">\n" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " <document type=\"type2\" mode='index'/>\n" + " </documents>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"node0\"/>\n" + " </group>\n" + "</content>" + "<admin version=\"2.0\">" + " <logserver hostalias=\"node0\"/>" + " <adminserver hostalias=\"node0\"/>" + "</admin>" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); model.getConfig(builder, "storage/storage/0"); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); String expected = "[vds.filestor.alldisks.allthreads.put.sum\n" + "vds.filestor.alldisks.allthreads.get.sum\n" + "vds.filestor.alldisks.allthreads.remove.sum\n" + "vds.filestor.alldisks.allthreads.update.sum\n" + "vds.datastored.alldisks.docs\n" + "vds.datastored.alldisks.bytes\n" + "vds.filestor.alldisks.queuesize\n" + "vds.filestor.alldisks.averagequeuewait.sum\n" + "vds.visitor.cv_queuewaittime\n" + "vds.visitor.allthreads.averagequeuewait\n" + "vds.visitor.allthreads.averagevisitorlifetime\n" + "vds.visitor.allthreads.created.sum]"; String actual = getConsumer("log", config).addedmetrics().toString().replaceAll(", ", "\n"); assertEquals(expected, actual); assertEquals("[logdefault]", getConsumer("log", config).tags().toString()); expected = "[vds.datastored.alldisks.docs\n" + "vds.datastored.alldisks.bytes\n" + "vds.datastored.alldisks.buckets\n" + "vds.datastored.bucket_space.buckets_total]"; actual = getConsumer("fleetcontroller", config).addedmetrics().toString().replaceAll(", ", "\n"); assertEquals(expected, actual); } { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); model.getConfig(builder, "storage/distributor/0"); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); assertEquals("[logdefault]", getConsumer("log", config).tags().toString()); } } @Test public void flush_on_shutdown_is_default_on_for_non_hosted() throws Exception { assertPrepareRestartCommand(createOneNodeCluster(false)); } @Test public void flush_on_shutdown_can_be_turned_off_for_non_hosted() throws Exception { assertNoPreShutdownCommand(createClusterWithFlushOnShutdownOverride(false, false)); } @Test public void flush_on_shutdown_is_default_on_for_hosted() throws Exception { assertPrepareRestartCommand(createOneNodeCluster(true)); } @Test public void flush_on_shutdown_can_be_turned_on_for_hosted() throws Exception { assertPrepareRestartCommand(createClusterWithFlushOnShutdownOverride(true, true)); } private static String oneNodeClusterXml() { return "<content version=\"1.0\" id=\"mockcluster\">" + " <documents/>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>"; } private static ContentCluster createOneNodeCluster(boolean isHostedVespa) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), new TestProperties().setHostedVespa(isHostedVespa)); } private static ContentCluster createOneNodeCluster(TestProperties props) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), props); } private static ContentCluster createOneNodeCluster(TestProperties props, Optional<Flavor> flavor) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), props, flavor); } private static ContentCluster createClusterWithFlushOnShutdownOverride(boolean flushOnShutdown, boolean isHostedVespa) throws Exception { return createOneNodeCluster("<content version=\"1.0\" id=\"mockcluster\">" + " <documents/>" + " <engine>" + " <proton>" + " <flush-on-shutdown>" + flushOnShutdown + "</flush-on-shutdown>" + " </proton>" + " </engine>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>", new TestProperties().setHostedVespa(isHostedVespa)); } private static ContentCluster createOneNodeCluster(String clusterXml, TestProperties props) throws Exception { return createOneNodeCluster(clusterXml, props, Optional.empty()); } private static ContentCluster createOneNodeCluster(String clusterXml, TestProperties props, Optional<Flavor> flavor) throws Exception { DeployState.Builder deployStateBuilder = new DeployState.Builder() .properties(props); MockRoot root = flavor.isPresent() ? ContentClusterUtils.createMockRoot(new SingleNodeProvisioner(flavor.get()), Collections.emptyList(), deployStateBuilder) : ContentClusterUtils.createMockRoot(Collections.emptyList(), deployStateBuilder); ContentCluster cluster = ContentClusterUtils.createCluster(clusterXml, root); root.freezeModelTopology(); cluster.validate(); return cluster; } private static void assertPrepareRestartCommand(ContentCluster cluster) { Optional<String> command = cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand(); assertTrue(command.isPresent()); assertTrue(command.get().matches(".*vespa-proton-cmd [0-9]+ prepareRestart")); } private static void assertNoPreShutdownCommand(ContentCluster cluster) { Optional<String> command = cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand(); assertFalse(command.isPresent()); } @Test public void reserved_document_name_throws_exception() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("The following document types conflict with reserved keyword names: 'true'."); String xml = "<content version=\"1.0\" id=\"storage\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"true\" mode=\"index\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>"; List<String> sds = ApplicationPackageUtils.generateSchemas("true"); new VespaModelCreatorWithMockPkg(null, xml, sds).create(); } private void assertClusterHasBucketSpaceMappings(AllClustersBucketSpacesConfig config, String clusterId, List<String> defaultSpaceTypes, List<String> globalSpaceTypes) { AllClustersBucketSpacesConfig.Cluster cluster = config.cluster(clusterId); assertNotNull(cluster); assertEquals(defaultSpaceTypes.size() + globalSpaceTypes.size(), cluster.documentType().size()); assertClusterHasTypesInBucketSpace(cluster, "default", defaultSpaceTypes); assertClusterHasTypesInBucketSpace(cluster, "global", globalSpaceTypes); } private void assertClusterHasTypesInBucketSpace(AllClustersBucketSpacesConfig.Cluster cluster, String bucketSpace, List<String> expectedTypes) { for (String type : expectedTypes) { assertNotNull(cluster.documentType(type)); assertEquals(bucketSpace, cluster.documentType(type).bucketSpace()); } } private VespaModel createDualContentCluster() { String xml = "<services>" + "<admin version=\"2.0\">" + " <adminserver hostalias=\"node0\"/>" + "</admin>" + "<content version=\"1.0\" id=\"foo_c\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"bunnies\" mode=\"index\"/>" + " <document type=\"hares\" mode=\"index\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"node0\"/>" + " </group>" + "</content>" + "<content version=\"1.0\" id=\"bar_c\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"rabbits\" mode=\"index\" global=\"true\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"node0\"/>" + " </group>" + "</content>" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("bunnies", "hares", "rabbits"); return new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); } @Test public void all_clusters_bucket_spaces_config_contains_mappings_across_all_clusters() { VespaModel model = createDualContentCluster(); AllClustersBucketSpacesConfig.Builder builder = new AllClustersBucketSpacesConfig.Builder(); model.getConfig(builder, "client"); AllClustersBucketSpacesConfig config = builder.build(); assertEquals(2, config.cluster().size()); assertClusterHasBucketSpaceMappings(config, "foo_c", Arrays.asList("bunnies", "hares"), Collections.emptyList()); assertClusterHasBucketSpaceMappings(config, "bar_c", Collections.emptyList(), Collections.singletonList("rabbits")); } @Test public void test_routing_with_multiple_clusters() { VespaModel model = createDualContentCluster(); Routing routing = model.getRouting(); assertNotNull(routing); assertEquals("[]", routing.getErrors().toString()); assertEquals(1, routing.getProtocols().size()); DocumentProtocol protocol = (DocumentProtocol) routing.getProtocols().get(0); RoutingTableSpec spec = protocol.getRoutingTableSpec(); assertEquals(3, spec.getNumHops()); assertEquals("docproc/cluster.bar_c.indexing/chain.indexing", spec.getHop(0).getName()); assertEquals("docproc/cluster.foo_c.indexing/chain.indexing", spec.getHop(1).getName()); assertEquals("indexing", spec.getHop(2).getName()); assertEquals(10, spec.getNumRoutes()); assertRoute(spec.getRoute(0), "bar_c", "[MessageType:bar_c]"); assertRoute(spec.getRoute(1), "bar_c-direct", "[Content:cluster=bar_c]"); assertRoute(spec.getRoute(2), "bar_c-index", "docproc/cluster.bar_c.indexing/chain.indexing", "[Content:cluster=bar_c]"); assertRoute(spec.getRoute(3), "default", "indexing"); assertRoute(spec.getRoute(4), "default-get", "indexing"); assertRoute(spec.getRoute(5), "foo_c", "[MessageType:foo_c]"); assertRoute(spec.getRoute(6), "foo_c-direct", "[Content:cluster=foo_c]"); assertRoute(spec.getRoute(7), "foo_c-index", "docproc/cluster.foo_c.indexing/chain.indexing", "[Content:cluster=foo_c]"); assertRoute(spec.getRoute(8), "storage/cluster.bar_c", "route:bar_c"); assertRoute(spec.getRoute(9), "storage/cluster.foo_c", "route:foo_c"); } private ContentCluster createWithZone(String clusterXml, Zone zone) throws Exception { DeployState.Builder deployStateBuilder = new DeployState.Builder() .zone(zone) .properties(new TestProperties().setHostedVespa(true)); List<String> schemas = SchemaBuilder.createSchemas("test"); MockRoot root = ContentClusterUtils.createMockRoot(schemas, deployStateBuilder); ContentCluster cluster = ContentClusterUtils.createCluster(clusterXml, root); root.freezeModelTopology(); cluster.validate(); return cluster; } private void assertDistributionBitsInConfig(ContentCluster cluster, int distributionBits) { FleetcontrollerConfig.Builder builder = new FleetcontrollerConfig.Builder(); cluster.getConfig(builder); cluster.getClusterControllerConfig().getConfig(builder); FleetcontrollerConfig config = new FleetcontrollerConfig(builder); assertEquals(distributionBits, config.ideal_distribution_bits()); StorDistributormanagerConfig.Builder sdBuilder = new StorDistributormanagerConfig.Builder(); cluster.getConfig(sdBuilder); StorDistributormanagerConfig storDistributormanagerConfig = new StorDistributormanagerConfig(sdBuilder); assertEquals(distributionBits, storDistributormanagerConfig.minsplitcount()); } private void verifyTopKProbabilityPropertiesControl() { VespaModel model = createEnd2EndOneNode(new TestProperties()); ContentCluster cc = model.getContentClusters().get("storage"); DispatchConfig.Builder builder = new DispatchConfig.Builder(); cc.getSearch().getConfig(builder); DispatchConfig cfg = new DispatchConfig(builder); assertEquals(0.9999, cfg.topKProbability(), 0.0); } @Test public void default_topKprobability_controlled_by_properties() { verifyTopKProbabilityPropertiesControl(); } private boolean resolveThreePhaseUpdateConfigWithFeatureFlag(boolean flagEnableThreePhase) { VespaModel model = createEnd2EndOneNode(new TestProperties().setUseThreePhaseUpdates(flagEnableThreePhase)); ContentCluster cc = model.getContentClusters().get("storage"); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).enable_metadata_only_fetch_phase_for_inconsistent_updates(); } @Test public void default_distributor_three_phase_update_config_controlled_by_properties() { assertFalse(resolveThreePhaseUpdateConfigWithFeatureFlag(false)); assertTrue(resolveThreePhaseUpdateConfigWithFeatureFlag(true)); } private int resolveMaxCompactBuffers(OptionalInt maxCompactBuffers) { TestProperties testProperties = new TestProperties(); if (maxCompactBuffers.isPresent()) { testProperties.maxCompactBuffers(maxCompactBuffers.getAsInt()); } VespaModel model = createEnd2EndOneNode(testProperties); ContentCluster cc = model.getContentClusters().get("storage"); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(1, protonConfig.documentdb().size()); return protonConfig.documentdb(0).allocation().max_compact_buffers(); } @Test void assertZookeeperServerImplementation(String expectedClassName, ClusterControllerContainerCluster clusterControllerCluster) { for (ClusterControllerContainer c : clusterControllerCluster.getContainers()) { var builder = new ComponentsConfig.Builder(); c.getConfig(builder); assertEquals(1, new ComponentsConfig(builder).components().stream() .filter(component -> component.classId().equals(expectedClassName)) .count()); } } private StorDistributormanagerConfig resolveStorDistributormanagerConfig(TestProperties props) throws Exception { var cc = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)); } private int resolveMaxInhibitedGroupsConfigWithFeatureFlag(int maxGroups) throws Exception { var cfg = resolveStorDistributormanagerConfig(new TestProperties().maxActivationInhibitedOutOfSyncGroups(maxGroups)); return cfg.max_activation_inhibited_out_of_sync_groups(); } @Test public void default_distributor_max_inhibited_group_activation_config_controlled_by_properties() throws Exception { assertEquals(0, resolveMaxInhibitedGroupsConfigWithFeatureFlag(0)); assertEquals(2, resolveMaxInhibitedGroupsConfigWithFeatureFlag(2)); } private int resolveNumDistributorStripesConfig(Optional<Flavor> flavor) throws Exception { var cc = createOneNodeCluster(new TestProperties(), flavor); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getChildren().get("0").getConfig(builder); return (new StorDistributormanagerConfig(builder)).num_distributor_stripes(); } private int resolveTunedNumDistributorStripesConfig(int numCpuCores) throws Exception { var flavor = new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().name("test").minCpuCores(numCpuCores))); return resolveNumDistributorStripesConfig(Optional.of(flavor)); } @Test public void num_distributor_stripes_config_defaults_to_zero() throws Exception { assertEquals(0, resolveNumDistributorStripesConfig(Optional.empty())); } @Test public void num_distributor_stripes_config_tuned_by_flavor() throws Exception { assertEquals(1, resolveTunedNumDistributorStripesConfig(1)); assertEquals(1, resolveTunedNumDistributorStripesConfig(16)); assertEquals(2, resolveTunedNumDistributorStripesConfig(17)); assertEquals(2, resolveTunedNumDistributorStripesConfig(64)); assertEquals(4, resolveTunedNumDistributorStripesConfig(65)); } @Test public void distributor_merge_busy_wait_controlled_by_properties() throws Exception { assertEquals(10, resolveDistributorMergeBusyWaitConfig(Optional.empty())); assertEquals(1, resolveDistributorMergeBusyWaitConfig(Optional.of(1))); } private int resolveDistributorMergeBusyWaitConfig(Optional<Integer> mergeBusyWait) throws Exception { var props = new TestProperties(); if (mergeBusyWait.isPresent()) { props.setDistributorMergeBusyWait(mergeBusyWait.get()); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).inhibit_merge_sending_on_busy_node_duration_sec(); } @Test public void distributor_enhanced_maintenance_scheduling_controlled_by_properties() throws Exception { assertFalse(resolveDistributorEnhancedSchedulingConfig(false)); assertTrue(resolveDistributorEnhancedSchedulingConfig(true)); } private boolean resolveDistributorEnhancedSchedulingConfig(boolean enhancedScheduling) throws Exception { var props = new TestProperties(); if (enhancedScheduling) { props.distributorEnhancedMaintenanceScheduling(enhancedScheduling); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).implicitly_clear_bucket_priority_on_schedule(); } @Test public void unordered_merge_chaining_config_controlled_by_properties() throws Exception { assertFalse(resolveUnorderedMergeChainingConfig(false)); assertTrue(resolveUnorderedMergeChainingConfig(true)); } private boolean resolveUnorderedMergeChainingConfig(boolean unorderedMergeChaining) throws Exception { var props = new TestProperties(); if (unorderedMergeChaining) { props.setUnorderedMergeChaining(true); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).use_unordered_merge_chaining(); } @Test public void testDedicatedClusterControllers() { VespaModel noContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " </services>"); assertEquals(Map.of(), noContentModel.getContentClusters()); assertNull("No cluster controller without content", noContentModel.getAdmin().getClusterControllers()); VespaModel oneContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " <content id='storage' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " </content>" + " </services>"); assertNotNull("Shared cluster controller with content", oneContentModel.getAdmin().getClusterControllers()); String twoContentServices = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " <content id='storage' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " <tuning>" + " <cluster-controller>" + " <min-distributor-up-ratio>0.618</min-distributor-up-ratio>" + " </cluster-controller>" + " </tuning>" + " </content>" + " <content id='dev-null' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " <tuning>" + " <cluster-controller>" + " <min-distributor-up-ratio>0.418</min-distributor-up-ratio>" + " </cluster-controller>" + " </tuning>" + " </content>" + " </services>"; VespaModel twoContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), twoContentServices); assertNotNull("Shared cluster controller with content", twoContentModel.getAdmin().getClusterControllers()); ClusterControllerContainerCluster clusterControllers = twoContentModel.getAdmin().getClusterControllers(); assertEquals(1, clusterControllers.reindexingContext().documentTypesForCluster("storage").size()); assertEquals(1, clusterControllers.reindexingContext().documentTypesForCluster("dev-null").size()); var storageBuilder = new FleetcontrollerConfig.Builder(); var devNullBuilder = new FleetcontrollerConfig.Builder(); twoContentModel.getConfig(storageBuilder, "admin/standalone/cluster-controllers/0/components/clustercontroller-storage-configurer"); twoContentModel.getConfig(devNullBuilder, "admin/standalone/cluster-controllers/0/components/clustercontroller-dev-null-configurer"); assertEquals(0.618, storageBuilder.build().min_distributor_up_ratio(), 1e-9); assertEquals(0.418, devNullBuilder.build().min_distributor_up_ratio(), 1e-9); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", clusterControllers); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.Reconfigurer", clusterControllers); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", clusterControllers); } }
return run; oldTaskRun is useless, just return.
private TaskRun getTaskRun(PriorityBlockingQueue<TaskRun> taskRuns, TaskRun taskRun) { TaskRun oldTaskRun = null; for (TaskRun run : taskRuns) { if (run.equals(taskRun)) { oldTaskRun = run; break; } } return oldTaskRun; }
oldTaskRun = run;
private TaskRun getTaskRun(PriorityBlockingQueue<TaskRun> taskRuns, TaskRun taskRun) { TaskRun oldTaskRun = null; for (TaskRun run : taskRuns) { if (run.equals(taskRun)) { oldTaskRun = run; break; } } return oldTaskRun; }
class TaskRunManager { private static final Logger LOG = LogManager.getLogger(TaskRunManager.class); private final Map<Long, PriorityBlockingQueue<TaskRun>> pendingTaskRunMap = Maps.newConcurrentMap(); private final Map<Long, TaskRun> runningTaskRunMap = Maps.newConcurrentMap(); private final TaskRunHistory taskRunHistory = new TaskRunHistory(); private final TaskRunExecutor taskRunExecutor = new TaskRunExecutor(); private final QueryableReentrantLock taskRunLock = new QueryableReentrantLock(true); public SubmitResult submitTaskRun(TaskRun taskRun, ExecuteOption option) { if (taskRun.getStatus() != null) { return new SubmitResult(taskRun.getStatus().getQueryId(), SubmitResult.SubmitStatus.FAILED); } int validPendingCount = 0; for (Long taskId : pendingTaskRunMap.keySet()) { if (!pendingTaskRunMap.get(taskId).isEmpty()) { validPendingCount++; } } if (validPendingCount >= Config.task_runs_queue_length) { LOG.warn("pending TaskRun exceeds task_runs_queue_length:{}, reject the submit.", Config.task_runs_queue_length); return new SubmitResult(null, SubmitResult.SubmitStatus.REJECTED); } String queryId = UUIDUtil.genUUID().toString(); TaskRunStatus status = taskRun.initStatus(queryId, System.currentTimeMillis()); status.setPriority(option.getPriority()); status.setMergeRedundant(option.isMergeRedundant()); GlobalStateMgr.getCurrentState().getEditLog().logTaskRunCreateStatus(status); arrangeTaskRun(taskRun, option.isMergeRedundant()); return new SubmitResult(queryId, SubmitResult.SubmitStatus.SUBMITTED); } public void arrangeTaskRun(TaskRun taskRun, boolean mergeRedundant) { if (!tryTaskRunLock()) { return; } try { long taskId = taskRun.getTaskId(); PriorityBlockingQueue<TaskRun> taskRuns = pendingTaskRunMap.computeIfAbsent(taskId, u -> Queues.newPriorityBlockingQueue()); if (mergeRedundant) { TaskRun oldTaskRun = getTaskRun(taskRuns, taskRun); if (oldTaskRun != null) { boolean isRemove = taskRuns.remove(taskRun); if (!isRemove) { LOG.warn("failed to remove TaskRun definition is [{}]", taskRun.getStatus().getDefinition()); } if (oldTaskRun.getStatus().getPriority() > taskRun.getStatus().getPriority()) { taskRun.getStatus().setPriority(oldTaskRun.getStatus().getPriority()); } if (oldTaskRun.getStatus().getCreateTime() > taskRun.getStatus().getCreateTime()) { taskRun.getStatus().setCreateTime(oldTaskRun.getStatus().getCreateTime()); } } } taskRuns.offer(taskRun); } finally { taskRunUnlock(); } } @Nullable public void checkRunningTaskRun() { Iterator<Long> runningIterator = runningTaskRunMap.keySet().iterator(); while (runningIterator.hasNext()) { Long taskId = runningIterator.next(); TaskRun taskRun = runningTaskRunMap.get(taskId); if (taskRun == null) { LOG.warn("failed to get running TaskRun by taskId:{}", taskId); runningIterator.remove(); return; } Future<?> future = taskRun.getFuture(); if (future.isDone()) { runningIterator.remove(); taskRunHistory.addHistory(taskRun.getStatus()); TaskRunStatusChange statusChange = new TaskRunStatusChange(taskRun.getTaskId(), taskRun.getStatus(), Constants.TaskRunState.RUNNING, taskRun.getStatus().getState()); GlobalStateMgr.getCurrentState().getEditLog().logUpdateTaskRun(statusChange); } } } public void scheduledPendingTaskRun() { int currentRunning = runningTaskRunMap.size(); Iterator<Long> pendingIterator = pendingTaskRunMap.keySet().iterator(); while (pendingIterator.hasNext()) { Long taskId = pendingIterator.next(); TaskRun runningTaskRun = runningTaskRunMap.get(taskId); if (runningTaskRun == null) { Queue<TaskRun> taskRunQueue = pendingTaskRunMap.get(taskId); if (taskRunQueue.size() == 0) { pendingIterator.remove(); } else { if (currentRunning >= Config.task_runs_concurrency) { break; } TaskRun pendingTaskRun = taskRunQueue.poll(); taskRunExecutor.executeTaskRun(pendingTaskRun); runningTaskRunMap.put(taskId, pendingTaskRun); TaskRunStatusChange statusChange = new TaskRunStatusChange(taskId, pendingTaskRun.getStatus(), Constants.TaskRunState.PENDING, Constants.TaskRunState.RUNNING); GlobalStateMgr.getCurrentState().getEditLog().logUpdateTaskRun(statusChange); currentRunning++; } } } } public boolean tryTaskRunLock() { try { if (!taskRunLock.tryLock(5, TimeUnit.SECONDS)) { Thread owner = taskRunLock.getOwner(); if (owner != null) { LOG.warn("task run lock is held by: {}", () -> Util.dumpThread(owner, 50)); } else { LOG.warn("task run lock owner is null"); } return false; } return true; } catch (InterruptedException e) { LOG.warn("got exception while getting task run lock", e); Thread.currentThread().interrupt(); } return false; } public void taskRunUnlock() { this.taskRunLock.unlock(); } public Map<Long, PriorityBlockingQueue<TaskRun>> getPendingTaskRunMap() { return pendingTaskRunMap; } public Map<Long, TaskRun> getRunningTaskRunMap() { return runningTaskRunMap; } public TaskRunHistory getTaskRunHistory() { return taskRunHistory; } }
class TaskRunManager { private static final Logger LOG = LogManager.getLogger(TaskRunManager.class); private final Map<Long, PriorityBlockingQueue<TaskRun>> pendingTaskRunMap = Maps.newConcurrentMap(); private final Map<Long, TaskRun> runningTaskRunMap = Maps.newConcurrentMap(); private final TaskRunHistory taskRunHistory = new TaskRunHistory(); private final TaskRunExecutor taskRunExecutor = new TaskRunExecutor(); private final QueryableReentrantLock taskRunLock = new QueryableReentrantLock(true); public SubmitResult submitTaskRun(TaskRun taskRun, ExecuteOption option) { if (taskRun.getStatus() != null) { return new SubmitResult(taskRun.getStatus().getQueryId(), SubmitResult.SubmitStatus.FAILED); } int validPendingCount = 0; for (Long taskId : pendingTaskRunMap.keySet()) { if (!pendingTaskRunMap.get(taskId).isEmpty()) { validPendingCount++; } } if (validPendingCount >= Config.task_runs_queue_length) { LOG.warn("pending TaskRun exceeds task_runs_queue_length:{}, reject the submit.", Config.task_runs_queue_length); return new SubmitResult(null, SubmitResult.SubmitStatus.REJECTED); } String queryId = UUIDUtil.genUUID().toString(); TaskRunStatus status = taskRun.initStatus(queryId, System.currentTimeMillis()); status.setPriority(option.getPriority()); status.setMergeRedundant(option.isMergeRedundant()); GlobalStateMgr.getCurrentState().getEditLog().logTaskRunCreateStatus(status); arrangeTaskRun(taskRun, option.isMergeRedundant()); return new SubmitResult(queryId, SubmitResult.SubmitStatus.SUBMITTED); } public void arrangeTaskRun(TaskRun taskRun, boolean mergeRedundant) { if (!tryTaskRunLock()) { return; } try { long taskId = taskRun.getTaskId(); PriorityBlockingQueue<TaskRun> taskRuns = pendingTaskRunMap.computeIfAbsent(taskId, u -> Queues.newPriorityBlockingQueue()); if (mergeRedundant) { TaskRun oldTaskRun = getTaskRun(taskRuns, taskRun); if (oldTaskRun != null) { boolean isRemove = taskRuns.remove(taskRun); if (!isRemove) { LOG.warn("failed to remove TaskRun definition is [{}]", taskRun.getStatus().getDefinition()); } if (oldTaskRun.getStatus().getPriority() > taskRun.getStatus().getPriority()) { taskRun.getStatus().setPriority(oldTaskRun.getStatus().getPriority()); } if (oldTaskRun.getStatus().getCreateTime() > taskRun.getStatus().getCreateTime()) { taskRun.getStatus().setCreateTime(oldTaskRun.getStatus().getCreateTime()); } } } taskRuns.offer(taskRun); } finally { taskRunUnlock(); } } @Nullable public void checkRunningTaskRun() { Iterator<Long> runningIterator = runningTaskRunMap.keySet().iterator(); while (runningIterator.hasNext()) { Long taskId = runningIterator.next(); TaskRun taskRun = runningTaskRunMap.get(taskId); if (taskRun == null) { LOG.warn("failed to get running TaskRun by taskId:{}", taskId); runningIterator.remove(); return; } Future<?> future = taskRun.getFuture(); if (future.isDone()) { runningIterator.remove(); taskRunHistory.addHistory(taskRun.getStatus()); TaskRunStatusChange statusChange = new TaskRunStatusChange(taskRun.getTaskId(), taskRun.getStatus(), Constants.TaskRunState.RUNNING, taskRun.getStatus().getState()); GlobalStateMgr.getCurrentState().getEditLog().logUpdateTaskRun(statusChange); } } } public void scheduledPendingTaskRun() { int currentRunning = runningTaskRunMap.size(); Iterator<Long> pendingIterator = pendingTaskRunMap.keySet().iterator(); while (pendingIterator.hasNext()) { Long taskId = pendingIterator.next(); TaskRun runningTaskRun = runningTaskRunMap.get(taskId); if (runningTaskRun == null) { Queue<TaskRun> taskRunQueue = pendingTaskRunMap.get(taskId); if (taskRunQueue.size() == 0) { pendingIterator.remove(); } else { if (currentRunning >= Config.task_runs_concurrency) { break; } TaskRun pendingTaskRun = taskRunQueue.poll(); taskRunExecutor.executeTaskRun(pendingTaskRun); runningTaskRunMap.put(taskId, pendingTaskRun); TaskRunStatusChange statusChange = new TaskRunStatusChange(taskId, pendingTaskRun.getStatus(), Constants.TaskRunState.PENDING, Constants.TaskRunState.RUNNING); GlobalStateMgr.getCurrentState().getEditLog().logUpdateTaskRun(statusChange); currentRunning++; } } } } public boolean tryTaskRunLock() { try { if (!taskRunLock.tryLock(5, TimeUnit.SECONDS)) { Thread owner = taskRunLock.getOwner(); if (owner != null) { LOG.warn("task run lock is held by: {}", () -> Util.dumpThread(owner, 50)); } else { LOG.warn("task run lock owner is null"); } return false; } return true; } catch (InterruptedException e) { LOG.warn("got exception while getting task run lock", e); Thread.currentThread().interrupt(); } return false; } public void taskRunUnlock() { this.taskRunLock.unlock(); } public Map<Long, PriorityBlockingQueue<TaskRun>> getPendingTaskRunMap() { return pendingTaskRunMap; } public Map<Long, TaskRun> getRunningTaskRunMap() { return runningTaskRunMap; } public TaskRunHistory getTaskRunHistory() { return taskRunHistory; } }
Fixed.
public void default_max_compact_buffers_config_controlled_by_properties() { assertEquals(2, resolveMaxCompactBuffers(2)); assertEquals(7, resolveMaxCompactBuffers(7)); }
assertEquals(2, resolveMaxCompactBuffers(2));
public void default_max_compact_buffers_config_controlled_by_properties() { assertEquals(1, resolveMaxCompactBuffers(OptionalInt.empty())); assertEquals(2, resolveMaxCompactBuffers(OptionalInt.of(2))); assertEquals(7, resolveMaxCompactBuffers(OptionalInt.of(7))); }
class ContentClusterTest extends ContentBaseTest { private final static String HOSTS = "<admin version='2.0'><adminserver hostalias='mockhost' /></admin>"; @Rule public ExpectedException expectedException = ExpectedException.none(); ContentCluster parse(String xml) { xml = HOSTS + xml; TestRoot root = new TestDriver().buildModel(xml); return root.getConfigModels(Content.class).get(0).getCluster(); } @Test public void testHierarchicRedundancy() { ContentCluster cc = parse("" + "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy>15</redundancy>\n" + " <group name='root' distribution-key='0'>" + " <distribution partitions='1|1|*'/>" + " <group name='g-1' distribution-key='0'>" + " <node hostalias='mockhost' distribution-key='0'/>" + " <node hostalias='mockhost' distribution-key='1'/>" + " <node hostalias='mockhost' distribution-key='2'/>" + " <node hostalias='mockhost' distribution-key='3'/>" + " <node hostalias='mockhost' distribution-key='4'/>" + " </group>" + " <group name='g-2' distribution-key='1'>" + " <node hostalias='mockhost' distribution-key='5'/>" + " <node hostalias='mockhost' distribution-key='6'/>" + " <node hostalias='mockhost' distribution-key='7'/>" + " <node hostalias='mockhost' distribution-key='8'/>" + " <node hostalias='mockhost' distribution-key='9'/>" + " </group>" + " <group name='g-3' distribution-key='1'>" + " <node hostalias='mockhost' distribution-key='10'/>" + " <node hostalias='mockhost' distribution-key='11'/>" + " <node hostalias='mockhost' distribution-key='12'/>" + " <node hostalias='mockhost' distribution-key='13'/>" + " <node hostalias='mockhost' distribution-key='14'/>" + " </group>" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").ready_copies()); assertEquals(15, distributionConfig.cluster("storage").initial_redundancy()); assertEquals(15, distributionConfig.cluster("storage").redundancy()); assertEquals(4, distributionConfig.cluster("storage").group().size()); assertEquals(1, distributionConfig.cluster().size()); StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder(); cc.getConfig(storBuilder); StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder); assertEquals(15, storConfig.initial_redundancy()); assertEquals(15, storConfig.redundancy()); assertEquals(3, storConfig.ready_copies()); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(1, protonConfig.distribution().searchablecopies()); assertEquals(5, protonConfig.distribution().redundancy()); } @Test public void testRedundancy() { ContentCluster cc = parse("" + "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy reply-after='4'>5</redundancy>\n" + " <group>" + " <node hostalias='mockhost' distribution-key='0'/>" + " <node hostalias='mockhost' distribution-key='1'/>" + " <node hostalias='mockhost' distribution-key='2'/>" + " <node hostalias='mockhost' distribution-key='3'/>" + " <node hostalias='mockhost' distribution-key='4'/>" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").ready_copies()); assertEquals(4, distributionConfig.cluster("storage").initial_redundancy()); assertEquals(5, distributionConfig.cluster("storage").redundancy()); StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder(); cc.getConfig(storBuilder); StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder); assertEquals(4, storConfig.initial_redundancy()); assertEquals(5, storConfig.redundancy()); assertEquals(3, storConfig.ready_copies()); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(3, protonConfig.distribution().searchablecopies()); assertEquals(5, protonConfig.distribution().redundancy()); } @Test public void testNoId() { ContentCluster c = parse( "<content version=\"1.0\">\n" + " <redundancy>1</redundancy>\n" + " <documents/>" + " <redundancy reply-after=\"4\">5</redundancy>\n" + " <group>" + " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + " </group>" + "</content>" ); assertEquals("content", c.getName()); } @Test public void testRedundancyDefaults() { ContentCluster cc = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>" + " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + " <node hostalias=\"mockhost\" distribution-key=\"1\"/>\"" + " <node hostalias=\"mockhost\" distribution-key=\"2\"/>\"" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").redundancy()); StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); cc.getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals(2, config.initial_redundancy()); assertEquals(3, config.redundancy()); assertEquals(2, config.ready_copies()); } @Test public void testEndToEnd() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"configserver\" />\n" + " <logserver hostalias=\"logserver\" />\n" + " <slobroks>\n" + " <slobrok hostalias=\"configserver\" />\n" + " <slobrok hostalias=\"logserver\" />\n" + " </slobroks>\n" + " <cluster-controllers>\n" + " <cluster-controller hostalias=\"configserver\"/>" + " <cluster-controller hostalias=\"configserver2\"/>" + " <cluster-controller hostalias=\"configserver3\"/>" + " </cluster-controllers>\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document type=\"type2\" mode=\"index\"/>\n" + " </documents>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + " <tuning>" + " <cluster-controller>\n" + " <init-progress-time>34567</init-progress-time>" + " </cluster-controller>" + " </tuning>" + " </content>" + "\n" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(null, xml, sds).create(); assertEquals(2, model.getContentClusters().get("bar").getDocumentDefinitions().size()); ContainerCluster<?> cluster = model.getAdmin().getClusterControllers(); assertEquals(3, cluster.getContainers().size()); } VespaModel createEnd2EndOneNode(ModelContext.Properties properties) { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='index' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " </services>"; return createEnd2EndOneNode(properties, services); } VespaModel createEnd2EndOneNode(ModelContext.Properties properties, String services) { DeployState.Builder deployStateBuilder = new DeployState.Builder().properties(properties); List<String> sds = ApplicationPackageUtils.generateSchemas("type1"); return (new VespaModelCreatorWithMockPkg(null, services, sds)).create(deployStateBuilder); } @Test public void testEndToEndOneNode() { VespaModel model = createEnd2EndOneNode(new TestProperties()); assertEquals(1, model.getContentClusters().get("storage").getDocumentDefinitions().size()); ContainerCluster<?> cluster = model.getAdmin().getClusterControllers(); assertEquals(1, cluster.getContainers().size()); } @Test public void testSearchTuning() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"node0\" />\n" + " <cluster-controllers>\n" + " <cluster-controller hostalias=\"node0\"/>" + " </cluster-controllers>\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " <document type=\"type2\" mode='index'/>\n" + " </documents>\n" + " <group>" + " <node hostalias='node0' distribution-key='0'/>" + " </group>" + " <tuning>\n" + " <cluster-controller>" + " <init-progress-time>34567</init-progress-time>" + " </cluster-controller>" + " </tuning>" + " </content>" + "\n" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); assertTrue(model.getContentClusters().get("bar").getPersistence() instanceof ProtonEngine.Factory); { StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder(); model.getConfig(builder, "bar/distributor/0"); StorDistributormanagerConfig config = new StorDistributormanagerConfig(builder); assertFalse(config.inlinebucketsplitting()); } { StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder(); model.getConfig(builder, "bar/storage/0"); StorFilestorConfig config = new StorFilestorConfig(builder); assertFalse(config.enable_multibit_split_optimalization()); } } @Test public void testRedundancyRequired() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"node0\" />\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " </documents>\n" + " <group>\n" + " <node hostalias='node0' distribution-key='0'/>\n" + " </group>\n" + " </content>\n" + "</services>\n"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); try{ new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); fail("Deploying without redundancy should fail"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("missing required element \"redundancy\"")); } } @Test public void testRedundancyFinalLessThanInitial() { try { parse( "<content version=\"1.0\" id=\"storage\">\n" + " <redundancy reply-after=\"4\">2</redundancy>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + "</content>" ); fail("no exception thrown"); } catch (Exception e) { /* ignore */ } } @Test public void testReadyTooHigh() { try { parse( "<content version=\"1.0\" id=\"storage\">\n" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy>2</redundancy>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + "</content>" ); fail("no exception thrown"); } catch (Exception e) { /* ignore */ } } FleetcontrollerConfig getFleetControllerConfig(String xml) { ContentCluster cluster = parse(xml); FleetcontrollerConfig.Builder builder = new FleetcontrollerConfig.Builder(); cluster.getConfig(builder); cluster.getClusterControllerConfig().getConfig(builder); return new FleetcontrollerConfig(builder); } @Test public void testFleetControllerOverride() { { FleetcontrollerConfig config = getFleetControllerConfig( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertEquals(0, config.min_storage_up_ratio(), 0.01); assertEquals(0, config.min_distributor_up_ratio(), 0.01); assertEquals(1, config.min_storage_up_count()); assertEquals(1, config.min_distributors_up_count()); } { FleetcontrollerConfig config = getFleetControllerConfig( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"2\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"3\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"4\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"5\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertNotSame(0, config.min_storage_up_ratio()); } } @Test public void testImplicitDistributionBits() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); } @Test public void testExplicitDistributionBits() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + " <tuning>\n" + " <distribution type=\"strict\"/>\n" + " </tuning>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + " <tuning>\n" + " <distribution type=\"loose\"/>\n" + " </tuning>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); } @Test public void testZoneDependentDistributionBits() throws Exception { String xml = new ContentClusterBuilder().docTypes("test").getXml(); ContentCluster prodWith16Bits = createWithZone(xml, new Zone(Environment.prod, RegionName.from("us-east-3"))); assertDistributionBitsInConfig(prodWith16Bits, 16); ContentCluster stagingNot16Bits = createWithZone(xml, new Zone(Environment.staging, RegionName.from("us-east-3"))); assertDistributionBitsInConfig(stagingNot16Bits, 8); } @Test public void testGenerateSearchNodes() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton/>" + " </engine>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); } { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("1").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); } } @Test public void testAlternativeNodeSyntax() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"test\">\n" + " <documents/>" + " <engine>" + " <proton/>" + " </engine>" + " <nodes>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " </nodes>\n" + "</content>" ); DistributionConfig.Builder bob = new DistributionConfig.Builder(); cluster.getConfig(bob); DistributionConfig.Cluster.Group group = bob.build().cluster("test").group(0); assertEquals("invalid", group.name()); assertEquals("invalid", group.index()); assertEquals(2, group.nodes().size()); StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); cluster.getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals("invalid", config.group(0).name()); assertEquals("invalid", config.group(0).index()); assertEquals(2, config.group(0).nodes().size()); } @Test public void testReadyWhenInitialOne() { StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <redundancy>1</redundancy>\n" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>" ).getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals(1, config.initial_redundancy()); assertEquals(1, config.redundancy()); assertEquals(1, config.ready_copies()); } public void testProvider(String tagName, StorServerConfig.Persistence_provider.Type.Enum type) { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <redundancy>3</redundancy>" + " <engine>\n" + " <" + tagName + "/>\n" + " </engine>\n" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>" ); { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); assertEquals(type, config.persistence_provider().type()); } { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); cluster.getDistributorNodes().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); assertEquals(type, config.persistence_provider().type()); } } @Test public void testProviders() { testProvider("proton", StorServerConfig.Persistence_provider.Type.RPC); testProvider("dummy", StorServerConfig.Persistence_provider.Type.DUMMY); } @Test public void testMetrics() { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); ContentCluster cluster = parse("<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); cluster.getConfig(builder); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); assertEquals(6, config.consumer().size()); assertEquals("status", config.consumer(0).name()); assertEquals("*", config.consumer(0).addedmetrics(0)); assertEquals("partofsum", config.consumer(0).removedtags(0)); assertEquals("log", config.consumer(1).name()); assertEquals("logdefault", config.consumer(1).tags().get(0)); assertEquals("loadtype", config.consumer(1).removedtags(0)); assertEquals("yamas", config.consumer(2).name()); assertEquals("yamasdefault", config.consumer(2).tags().get(0)); assertEquals("loadtype", config.consumer(2).removedtags(0)); assertEquals("health", config.consumer(3).name()); assertEquals("statereporter", config.consumer(5).name()); assertEquals("*", config.consumer(5).addedmetrics(0)); assertEquals("thread", config.consumer(5).removedtags(0)); assertEquals("partofsum", config.consumer(5).removedtags(1)); assertEquals(0, config.consumer(5).tags().size()); cluster.getStorageCluster().getConfig(builder); config = new MetricsmanagerConfig(builder); assertEquals(6, config.consumer().size()); assertEquals("fleetcontroller", config.consumer(4).name()); assertEquals(4, config.consumer(4).addedmetrics().size()); assertEquals("vds.datastored.alldisks.docs", config.consumer(4).addedmetrics(0)); assertEquals("vds.datastored.alldisks.bytes", config.consumer(4).addedmetrics(1)); assertEquals("vds.datastored.alldisks.buckets", config.consumer(4).addedmetrics(2)); assertEquals("vds.datastored.bucket_space.buckets_total", config.consumer(4).addedmetrics(3)); } public MetricsmanagerConfig.Consumer getConsumer(String consumer, MetricsmanagerConfig config) { for (MetricsmanagerConfig.Consumer c : config.consumer()) { if (c.name().equals(consumer)) { return c; } } return null; } @Test public void testConfiguredMetrics() { String xml = "" + "<services>" + "<content version=\"1.0\" id=\"storage\">\n" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " <document type=\"type2\" mode='index'/>\n" + " </documents>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"node0\"/>\n" + " </group>\n" + "</content>" + "<admin version=\"2.0\">" + " <logserver hostalias=\"node0\"/>" + " <adminserver hostalias=\"node0\"/>" + "</admin>" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); model.getConfig(builder, "storage/storage/0"); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); String expected = "[vds.filestor.alldisks.allthreads.put.sum\n" + "vds.filestor.alldisks.allthreads.get.sum\n" + "vds.filestor.alldisks.allthreads.remove.sum\n" + "vds.filestor.alldisks.allthreads.update.sum\n" + "vds.datastored.alldisks.docs\n" + "vds.datastored.alldisks.bytes\n" + "vds.filestor.alldisks.queuesize\n" + "vds.filestor.alldisks.averagequeuewait.sum\n" + "vds.visitor.cv_queuewaittime\n" + "vds.visitor.allthreads.averagequeuewait\n" + "vds.visitor.allthreads.averagevisitorlifetime\n" + "vds.visitor.allthreads.created.sum]"; String actual = getConsumer("log", config).addedmetrics().toString().replaceAll(", ", "\n"); assertEquals(expected, actual); assertEquals("[logdefault]", getConsumer("log", config).tags().toString()); expected = "[vds.datastored.alldisks.docs\n" + "vds.datastored.alldisks.bytes\n" + "vds.datastored.alldisks.buckets\n" + "vds.datastored.bucket_space.buckets_total]"; actual = getConsumer("fleetcontroller", config).addedmetrics().toString().replaceAll(", ", "\n"); assertEquals(expected, actual); } { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); model.getConfig(builder, "storage/distributor/0"); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); assertEquals("[logdefault]", getConsumer("log", config).tags().toString()); } } @Test public void flush_on_shutdown_is_default_on_for_non_hosted() throws Exception { assertPrepareRestartCommand(createOneNodeCluster(false)); } @Test public void flush_on_shutdown_can_be_turned_off_for_non_hosted() throws Exception { assertNoPreShutdownCommand(createClusterWithFlushOnShutdownOverride(false, false)); } @Test public void flush_on_shutdown_is_default_on_for_hosted() throws Exception { assertPrepareRestartCommand(createOneNodeCluster(true)); } @Test public void flush_on_shutdown_can_be_turned_on_for_hosted() throws Exception { assertPrepareRestartCommand(createClusterWithFlushOnShutdownOverride(true, true)); } private static String oneNodeClusterXml() { return "<content version=\"1.0\" id=\"mockcluster\">" + " <documents/>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>"; } private static ContentCluster createOneNodeCluster(boolean isHostedVespa) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), new TestProperties().setHostedVespa(isHostedVespa)); } private static ContentCluster createOneNodeCluster(TestProperties props) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), props); } private static ContentCluster createOneNodeCluster(TestProperties props, Optional<Flavor> flavor) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), props, flavor); } private static ContentCluster createClusterWithFlushOnShutdownOverride(boolean flushOnShutdown, boolean isHostedVespa) throws Exception { return createOneNodeCluster("<content version=\"1.0\" id=\"mockcluster\">" + " <documents/>" + " <engine>" + " <proton>" + " <flush-on-shutdown>" + flushOnShutdown + "</flush-on-shutdown>" + " </proton>" + " </engine>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>", new TestProperties().setHostedVespa(isHostedVespa)); } private static ContentCluster createOneNodeCluster(String clusterXml, TestProperties props) throws Exception { return createOneNodeCluster(clusterXml, props, Optional.empty()); } private static ContentCluster createOneNodeCluster(String clusterXml, TestProperties props, Optional<Flavor> flavor) throws Exception { DeployState.Builder deployStateBuilder = new DeployState.Builder() .properties(props); MockRoot root = flavor.isPresent() ? ContentClusterUtils.createMockRoot(new SingleNodeProvisioner(flavor.get()), Collections.emptyList(), deployStateBuilder) : ContentClusterUtils.createMockRoot(Collections.emptyList(), deployStateBuilder); ContentCluster cluster = ContentClusterUtils.createCluster(clusterXml, root); root.freezeModelTopology(); cluster.validate(); return cluster; } private static void assertPrepareRestartCommand(ContentCluster cluster) { Optional<String> command = cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand(); assertTrue(command.isPresent()); assertTrue(command.get().matches(".*vespa-proton-cmd [0-9]+ prepareRestart")); } private static void assertNoPreShutdownCommand(ContentCluster cluster) { Optional<String> command = cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand(); assertFalse(command.isPresent()); } @Test public void reserved_document_name_throws_exception() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("The following document types conflict with reserved keyword names: 'true'."); String xml = "<content version=\"1.0\" id=\"storage\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"true\" mode=\"index\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>"; List<String> sds = ApplicationPackageUtils.generateSchemas("true"); new VespaModelCreatorWithMockPkg(null, xml, sds).create(); } private void assertClusterHasBucketSpaceMappings(AllClustersBucketSpacesConfig config, String clusterId, List<String> defaultSpaceTypes, List<String> globalSpaceTypes) { AllClustersBucketSpacesConfig.Cluster cluster = config.cluster(clusterId); assertNotNull(cluster); assertEquals(defaultSpaceTypes.size() + globalSpaceTypes.size(), cluster.documentType().size()); assertClusterHasTypesInBucketSpace(cluster, "default", defaultSpaceTypes); assertClusterHasTypesInBucketSpace(cluster, "global", globalSpaceTypes); } private void assertClusterHasTypesInBucketSpace(AllClustersBucketSpacesConfig.Cluster cluster, String bucketSpace, List<String> expectedTypes) { for (String type : expectedTypes) { assertNotNull(cluster.documentType(type)); assertEquals(bucketSpace, cluster.documentType(type).bucketSpace()); } } private VespaModel createDualContentCluster() { String xml = "<services>" + "<admin version=\"2.0\">" + " <adminserver hostalias=\"node0\"/>" + "</admin>" + "<content version=\"1.0\" id=\"foo_c\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"bunnies\" mode=\"index\"/>" + " <document type=\"hares\" mode=\"index\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"node0\"/>" + " </group>" + "</content>" + "<content version=\"1.0\" id=\"bar_c\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"rabbits\" mode=\"index\" global=\"true\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"node0\"/>" + " </group>" + "</content>" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("bunnies", "hares", "rabbits"); return new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); } @Test public void all_clusters_bucket_spaces_config_contains_mappings_across_all_clusters() { VespaModel model = createDualContentCluster(); AllClustersBucketSpacesConfig.Builder builder = new AllClustersBucketSpacesConfig.Builder(); model.getConfig(builder, "client"); AllClustersBucketSpacesConfig config = builder.build(); assertEquals(2, config.cluster().size()); assertClusterHasBucketSpaceMappings(config, "foo_c", Arrays.asList("bunnies", "hares"), Collections.emptyList()); assertClusterHasBucketSpaceMappings(config, "bar_c", Collections.emptyList(), Collections.singletonList("rabbits")); } @Test public void test_routing_with_multiple_clusters() { VespaModel model = createDualContentCluster(); Routing routing = model.getRouting(); assertNotNull(routing); assertEquals("[]", routing.getErrors().toString()); assertEquals(1, routing.getProtocols().size()); DocumentProtocol protocol = (DocumentProtocol) routing.getProtocols().get(0); RoutingTableSpec spec = protocol.getRoutingTableSpec(); assertEquals(3, spec.getNumHops()); assertEquals("docproc/cluster.bar_c.indexing/chain.indexing", spec.getHop(0).getName()); assertEquals("docproc/cluster.foo_c.indexing/chain.indexing", spec.getHop(1).getName()); assertEquals("indexing", spec.getHop(2).getName()); assertEquals(10, spec.getNumRoutes()); assertRoute(spec.getRoute(0), "bar_c", "[MessageType:bar_c]"); assertRoute(spec.getRoute(1), "bar_c-direct", "[Content:cluster=bar_c]"); assertRoute(spec.getRoute(2), "bar_c-index", "docproc/cluster.bar_c.indexing/chain.indexing", "[Content:cluster=bar_c]"); assertRoute(spec.getRoute(3), "default", "indexing"); assertRoute(spec.getRoute(4), "default-get", "indexing"); assertRoute(spec.getRoute(5), "foo_c", "[MessageType:foo_c]"); assertRoute(spec.getRoute(6), "foo_c-direct", "[Content:cluster=foo_c]"); assertRoute(spec.getRoute(7), "foo_c-index", "docproc/cluster.foo_c.indexing/chain.indexing", "[Content:cluster=foo_c]"); assertRoute(spec.getRoute(8), "storage/cluster.bar_c", "route:bar_c"); assertRoute(spec.getRoute(9), "storage/cluster.foo_c", "route:foo_c"); } private ContentCluster createWithZone(String clusterXml, Zone zone) throws Exception { DeployState.Builder deployStateBuilder = new DeployState.Builder() .zone(zone) .properties(new TestProperties().setHostedVespa(true)); List<String> schemas = SchemaBuilder.createSchemas("test"); MockRoot root = ContentClusterUtils.createMockRoot(schemas, deployStateBuilder); ContentCluster cluster = ContentClusterUtils.createCluster(clusterXml, root); root.freezeModelTopology(); cluster.validate(); return cluster; } private void assertDistributionBitsInConfig(ContentCluster cluster, int distributionBits) { FleetcontrollerConfig.Builder builder = new FleetcontrollerConfig.Builder(); cluster.getConfig(builder); cluster.getClusterControllerConfig().getConfig(builder); FleetcontrollerConfig config = new FleetcontrollerConfig(builder); assertEquals(distributionBits, config.ideal_distribution_bits()); StorDistributormanagerConfig.Builder sdBuilder = new StorDistributormanagerConfig.Builder(); cluster.getConfig(sdBuilder); StorDistributormanagerConfig storDistributormanagerConfig = new StorDistributormanagerConfig(sdBuilder); assertEquals(distributionBits, storDistributormanagerConfig.minsplitcount()); } private void verifyTopKProbabilityPropertiesControl() { VespaModel model = createEnd2EndOneNode(new TestProperties()); ContentCluster cc = model.getContentClusters().get("storage"); DispatchConfig.Builder builder = new DispatchConfig.Builder(); cc.getSearch().getConfig(builder); DispatchConfig cfg = new DispatchConfig(builder); assertEquals(0.9999, cfg.topKProbability(), 0.0); } @Test public void default_topKprobability_controlled_by_properties() { verifyTopKProbabilityPropertiesControl(); } private boolean resolveThreePhaseUpdateConfigWithFeatureFlag(boolean flagEnableThreePhase) { VespaModel model = createEnd2EndOneNode(new TestProperties().setUseThreePhaseUpdates(flagEnableThreePhase)); ContentCluster cc = model.getContentClusters().get("storage"); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).enable_metadata_only_fetch_phase_for_inconsistent_updates(); } @Test public void default_distributor_three_phase_update_config_controlled_by_properties() { assertFalse(resolveThreePhaseUpdateConfigWithFeatureFlag(false)); assertTrue(resolveThreePhaseUpdateConfigWithFeatureFlag(true)); } private int resolveMaxCompactBuffers(int maxCompactBuffers) { VespaModel model = createEnd2EndOneNode(new TestProperties().maxCompactBuffers(maxCompactBuffers)); ContentCluster cc = model.getContentClusters().get("storage"); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(1, protonConfig.documentdb().size()); return protonConfig.documentdb(0).allocation().max_compact_buffers(); } @Test void assertZookeeperServerImplementation(String expectedClassName, ClusterControllerContainerCluster clusterControllerCluster) { for (ClusterControllerContainer c : clusterControllerCluster.getContainers()) { var builder = new ComponentsConfig.Builder(); c.getConfig(builder); assertEquals(1, new ComponentsConfig(builder).components().stream() .filter(component -> component.classId().equals(expectedClassName)) .count()); } } private StorDistributormanagerConfig resolveStorDistributormanagerConfig(TestProperties props) throws Exception { var cc = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)); } private int resolveMaxInhibitedGroupsConfigWithFeatureFlag(int maxGroups) throws Exception { var cfg = resolveStorDistributormanagerConfig(new TestProperties().maxActivationInhibitedOutOfSyncGroups(maxGroups)); return cfg.max_activation_inhibited_out_of_sync_groups(); } @Test public void default_distributor_max_inhibited_group_activation_config_controlled_by_properties() throws Exception { assertEquals(0, resolveMaxInhibitedGroupsConfigWithFeatureFlag(0)); assertEquals(2, resolveMaxInhibitedGroupsConfigWithFeatureFlag(2)); } private int resolveNumDistributorStripesConfig(Optional<Flavor> flavor) throws Exception { var cc = createOneNodeCluster(new TestProperties(), flavor); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getChildren().get("0").getConfig(builder); return (new StorDistributormanagerConfig(builder)).num_distributor_stripes(); } private int resolveTunedNumDistributorStripesConfig(int numCpuCores) throws Exception { var flavor = new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().name("test").minCpuCores(numCpuCores))); return resolveNumDistributorStripesConfig(Optional.of(flavor)); } @Test public void num_distributor_stripes_config_defaults_to_zero() throws Exception { assertEquals(0, resolveNumDistributorStripesConfig(Optional.empty())); } @Test public void num_distributor_stripes_config_tuned_by_flavor() throws Exception { assertEquals(1, resolveTunedNumDistributorStripesConfig(1)); assertEquals(1, resolveTunedNumDistributorStripesConfig(16)); assertEquals(2, resolveTunedNumDistributorStripesConfig(17)); assertEquals(2, resolveTunedNumDistributorStripesConfig(64)); assertEquals(4, resolveTunedNumDistributorStripesConfig(65)); } @Test public void distributor_merge_busy_wait_controlled_by_properties() throws Exception { assertEquals(10, resolveDistributorMergeBusyWaitConfig(Optional.empty())); assertEquals(1, resolveDistributorMergeBusyWaitConfig(Optional.of(1))); } private int resolveDistributorMergeBusyWaitConfig(Optional<Integer> mergeBusyWait) throws Exception { var props = new TestProperties(); if (mergeBusyWait.isPresent()) { props.setDistributorMergeBusyWait(mergeBusyWait.get()); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).inhibit_merge_sending_on_busy_node_duration_sec(); } @Test public void distributor_enhanced_maintenance_scheduling_controlled_by_properties() throws Exception { assertFalse(resolveDistributorEnhancedSchedulingConfig(false)); assertTrue(resolveDistributorEnhancedSchedulingConfig(true)); } private boolean resolveDistributorEnhancedSchedulingConfig(boolean enhancedScheduling) throws Exception { var props = new TestProperties(); if (enhancedScheduling) { props.distributorEnhancedMaintenanceScheduling(enhancedScheduling); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).implicitly_clear_bucket_priority_on_schedule(); } @Test public void unordered_merge_chaining_config_controlled_by_properties() throws Exception { assertFalse(resolveUnorderedMergeChainingConfig(false)); assertTrue(resolveUnorderedMergeChainingConfig(true)); } private boolean resolveUnorderedMergeChainingConfig(boolean unorderedMergeChaining) throws Exception { var props = new TestProperties(); if (unorderedMergeChaining) { props.setUnorderedMergeChaining(true); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).use_unordered_merge_chaining(); } @Test public void testDedicatedClusterControllers() { VespaModel noContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " </services>"); assertEquals(Map.of(), noContentModel.getContentClusters()); assertNull("No cluster controller without content", noContentModel.getAdmin().getClusterControllers()); VespaModel oneContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " <content id='storage' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " </content>" + " </services>"); assertNotNull("Shared cluster controller with content", oneContentModel.getAdmin().getClusterControllers()); String twoContentServices = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " <content id='storage' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " <tuning>" + " <cluster-controller>" + " <min-distributor-up-ratio>0.618</min-distributor-up-ratio>" + " </cluster-controller>" + " </tuning>" + " </content>" + " <content id='dev-null' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " <tuning>" + " <cluster-controller>" + " <min-distributor-up-ratio>0.418</min-distributor-up-ratio>" + " </cluster-controller>" + " </tuning>" + " </content>" + " </services>"; VespaModel twoContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), twoContentServices); assertNotNull("Shared cluster controller with content", twoContentModel.getAdmin().getClusterControllers()); ClusterControllerContainerCluster clusterControllers = twoContentModel.getAdmin().getClusterControllers(); assertEquals(1, clusterControllers.reindexingContext().documentTypesForCluster("storage").size()); assertEquals(1, clusterControllers.reindexingContext().documentTypesForCluster("dev-null").size()); var storageBuilder = new FleetcontrollerConfig.Builder(); var devNullBuilder = new FleetcontrollerConfig.Builder(); twoContentModel.getConfig(storageBuilder, "admin/standalone/cluster-controllers/0/components/clustercontroller-storage-configurer"); twoContentModel.getConfig(devNullBuilder, "admin/standalone/cluster-controllers/0/components/clustercontroller-dev-null-configurer"); assertEquals(0.618, storageBuilder.build().min_distributor_up_ratio(), 1e-9); assertEquals(0.418, devNullBuilder.build().min_distributor_up_ratio(), 1e-9); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", clusterControllers); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.Reconfigurer", clusterControllers); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", clusterControllers); } }
class ContentClusterTest extends ContentBaseTest { private final static String HOSTS = "<admin version='2.0'><adminserver hostalias='mockhost' /></admin>"; @Rule public ExpectedException expectedException = ExpectedException.none(); ContentCluster parse(String xml) { xml = HOSTS + xml; TestRoot root = new TestDriver().buildModel(xml); return root.getConfigModels(Content.class).get(0).getCluster(); } @Test public void testHierarchicRedundancy() { ContentCluster cc = parse("" + "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy>15</redundancy>\n" + " <group name='root' distribution-key='0'>" + " <distribution partitions='1|1|*'/>" + " <group name='g-1' distribution-key='0'>" + " <node hostalias='mockhost' distribution-key='0'/>" + " <node hostalias='mockhost' distribution-key='1'/>" + " <node hostalias='mockhost' distribution-key='2'/>" + " <node hostalias='mockhost' distribution-key='3'/>" + " <node hostalias='mockhost' distribution-key='4'/>" + " </group>" + " <group name='g-2' distribution-key='1'>" + " <node hostalias='mockhost' distribution-key='5'/>" + " <node hostalias='mockhost' distribution-key='6'/>" + " <node hostalias='mockhost' distribution-key='7'/>" + " <node hostalias='mockhost' distribution-key='8'/>" + " <node hostalias='mockhost' distribution-key='9'/>" + " </group>" + " <group name='g-3' distribution-key='1'>" + " <node hostalias='mockhost' distribution-key='10'/>" + " <node hostalias='mockhost' distribution-key='11'/>" + " <node hostalias='mockhost' distribution-key='12'/>" + " <node hostalias='mockhost' distribution-key='13'/>" + " <node hostalias='mockhost' distribution-key='14'/>" + " </group>" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").ready_copies()); assertEquals(15, distributionConfig.cluster("storage").initial_redundancy()); assertEquals(15, distributionConfig.cluster("storage").redundancy()); assertEquals(4, distributionConfig.cluster("storage").group().size()); assertEquals(1, distributionConfig.cluster().size()); StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder(); cc.getConfig(storBuilder); StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder); assertEquals(15, storConfig.initial_redundancy()); assertEquals(15, storConfig.redundancy()); assertEquals(3, storConfig.ready_copies()); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(1, protonConfig.distribution().searchablecopies()); assertEquals(5, protonConfig.distribution().redundancy()); } @Test public void testRedundancy() { ContentCluster cc = parse("" + "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy reply-after='4'>5</redundancy>\n" + " <group>" + " <node hostalias='mockhost' distribution-key='0'/>" + " <node hostalias='mockhost' distribution-key='1'/>" + " <node hostalias='mockhost' distribution-key='2'/>" + " <node hostalias='mockhost' distribution-key='3'/>" + " <node hostalias='mockhost' distribution-key='4'/>" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").ready_copies()); assertEquals(4, distributionConfig.cluster("storage").initial_redundancy()); assertEquals(5, distributionConfig.cluster("storage").redundancy()); StorDistributionConfig.Builder storBuilder = new StorDistributionConfig.Builder(); cc.getConfig(storBuilder); StorDistributionConfig storConfig = new StorDistributionConfig(storBuilder); assertEquals(4, storConfig.initial_redundancy()); assertEquals(5, storConfig.redundancy()); assertEquals(3, storConfig.ready_copies()); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(3, protonConfig.distribution().searchablecopies()); assertEquals(5, protonConfig.distribution().redundancy()); } @Test public void testNoId() { ContentCluster c = parse( "<content version=\"1.0\">\n" + " <redundancy>1</redundancy>\n" + " <documents/>" + " <redundancy reply-after=\"4\">5</redundancy>\n" + " <group>" + " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + " </group>" + "</content>" ); assertEquals("content", c.getName()); } @Test public void testRedundancyDefaults() { ContentCluster cc = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>" + " <node hostalias=\"mockhost\" distribution-key=\"0\"/>\"" + " <node hostalias=\"mockhost\" distribution-key=\"1\"/>\"" + " <node hostalias=\"mockhost\" distribution-key=\"2\"/>\"" + " </group>" + "</content>" ); DistributionConfig.Builder distributionBuilder = new DistributionConfig.Builder(); cc.getConfig(distributionBuilder); DistributionConfig distributionConfig = distributionBuilder.build(); assertEquals(3, distributionConfig.cluster("storage").redundancy()); StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); cc.getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals(2, config.initial_redundancy()); assertEquals(3, config.redundancy()); assertEquals(2, config.ready_copies()); } @Test public void testEndToEnd() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"configserver\" />\n" + " <logserver hostalias=\"logserver\" />\n" + " <slobroks>\n" + " <slobrok hostalias=\"configserver\" />\n" + " <slobrok hostalias=\"logserver\" />\n" + " </slobroks>\n" + " <cluster-controllers>\n" + " <cluster-controller hostalias=\"configserver\"/>" + " <cluster-controller hostalias=\"configserver2\"/>" + " <cluster-controller hostalias=\"configserver3\"/>" + " </cluster-controllers>\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode=\"index\"/>\n" + " <document type=\"type2\" mode=\"index\"/>\n" + " </documents>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + " <tuning>" + " <cluster-controller>\n" + " <init-progress-time>34567</init-progress-time>" + " </cluster-controller>" + " </tuning>" + " </content>" + "\n" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(null, xml, sds).create(); assertEquals(2, model.getContentClusters().get("bar").getDocumentDefinitions().size()); ContainerCluster<?> cluster = model.getAdmin().getClusterControllers(); assertEquals(3, cluster.getContainers().size()); } VespaModel createEnd2EndOneNode(ModelContext.Properties properties) { String services = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <admin version='2.0'>" + " <adminserver hostalias='node1'/>" + " </admin>" + " <container id='default' version='1.0'>" + " <search/>" + " <nodes>" + " <node hostalias='node1'/>" + " </nodes>" + " </container>" + " <content id='storage' version='1.0'>" + " <redundancy>2</redundancy>" + " <group>" + " <node distribution-key='0' hostalias='node1'/>" + " <node distribution-key='1' hostalias='node1'/>" + " </group>" + " <tuning>" + " <cluster-controller>" + " <transition-time>0</transition-time>" + " </cluster-controller>" + " </tuning>" + " <documents>" + " <document mode='index' type='type1'/>" + " </documents>" + " <engine>" + " <proton/>" + " </engine>" + " </content>" + " </services>"; return createEnd2EndOneNode(properties, services); } VespaModel createEnd2EndOneNode(ModelContext.Properties properties, String services) { DeployState.Builder deployStateBuilder = new DeployState.Builder().properties(properties); List<String> sds = ApplicationPackageUtils.generateSchemas("type1"); return (new VespaModelCreatorWithMockPkg(null, services, sds)).create(deployStateBuilder); } @Test public void testEndToEndOneNode() { VespaModel model = createEnd2EndOneNode(new TestProperties()); assertEquals(1, model.getContentClusters().get("storage").getDocumentDefinitions().size()); ContainerCluster<?> cluster = model.getAdmin().getClusterControllers(); assertEquals(1, cluster.getContainers().size()); } @Test public void testSearchTuning() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"node0\" />\n" + " <cluster-controllers>\n" + " <cluster-controller hostalias=\"node0\"/>" + " </cluster-controllers>\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " <document type=\"type2\" mode='index'/>\n" + " </documents>\n" + " <group>" + " <node hostalias='node0' distribution-key='0'/>" + " </group>" + " <tuning>\n" + " <cluster-controller>" + " <init-progress-time>34567</init-progress-time>" + " </cluster-controller>" + " </tuning>" + " </content>" + "\n" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); assertTrue(model.getContentClusters().get("bar").getPersistence() instanceof ProtonEngine.Factory); { StorDistributormanagerConfig.Builder builder = new StorDistributormanagerConfig.Builder(); model.getConfig(builder, "bar/distributor/0"); StorDistributormanagerConfig config = new StorDistributormanagerConfig(builder); assertFalse(config.inlinebucketsplitting()); } { StorFilestorConfig.Builder builder = new StorFilestorConfig.Builder(); model.getConfig(builder, "bar/storage/0"); StorFilestorConfig config = new StorFilestorConfig(builder); assertFalse(config.enable_multibit_split_optimalization()); } } @Test public void testRedundancyRequired() { String xml = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<services>\n" + "\n" + " <admin version=\"2.0\">\n" + " <adminserver hostalias=\"node0\" />\n" + " </admin>\n" + " <content version='1.0' id='bar'>" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " </documents>\n" + " <group>\n" + " <node hostalias='node0' distribution-key='0'/>\n" + " </group>\n" + " </content>\n" + "</services>\n"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); try{ new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); fail("Deploying without redundancy should fail"); } catch (IllegalArgumentException e) { assertTrue(e.getMessage(), e.getMessage().contains("missing required element \"redundancy\"")); } } @Test public void testRedundancyFinalLessThanInitial() { try { parse( "<content version=\"1.0\" id=\"storage\">\n" + " <redundancy reply-after=\"4\">2</redundancy>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + "</content>" ); fail("no exception thrown"); } catch (Exception e) { /* ignore */ } } @Test public void testReadyTooHigh() { try { parse( "<content version=\"1.0\" id=\"storage\">\n" + " <engine>" + " <proton>" + " <searchable-copies>3</searchable-copies>" + " </proton>" + " </engine>" + " <redundancy>2</redundancy>\n" + " <group>" + " <node hostalias='node0' distribution-key='0' />" + " </group>" + "</content>" ); fail("no exception thrown"); } catch (Exception e) { /* ignore */ } } FleetcontrollerConfig getFleetControllerConfig(String xml) { ContentCluster cluster = parse(xml); FleetcontrollerConfig.Builder builder = new FleetcontrollerConfig.Builder(); cluster.getConfig(builder); cluster.getClusterControllerConfig().getConfig(builder); return new FleetcontrollerConfig(builder); } @Test public void testFleetControllerOverride() { { FleetcontrollerConfig config = getFleetControllerConfig( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertEquals(0, config.min_storage_up_ratio(), 0.01); assertEquals(0, config.min_distributor_up_ratio(), 0.01); assertEquals(1, config.min_storage_up_count()); assertEquals(1, config.min_distributors_up_count()); } { FleetcontrollerConfig config = getFleetControllerConfig( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"2\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"3\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"4\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"5\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertNotSame(0, config.min_storage_up_ratio()); } } @Test public void testImplicitDistributionBits() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); } @Test public void testExplicitDistributionBits() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + " <tuning>\n" + " <distribution type=\"strict\"/>\n" + " </tuning>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + " <tuning>\n" + " <distribution type=\"loose\"/>\n" + " </tuning>\n" + "</content>" ); assertDistributionBitsInConfig(cluster, 8); } @Test public void testZoneDependentDistributionBits() throws Exception { String xml = new ContentClusterBuilder().docTypes("test").getXml(); ContentCluster prodWith16Bits = createWithZone(xml, new Zone(Environment.prod, RegionName.from("us-east-3"))); assertDistributionBitsInConfig(prodWith16Bits, 16); ContentCluster stagingNot16Bits = createWithZone(xml, new Zone(Environment.staging, RegionName.from("us-east-3"))); assertDistributionBitsInConfig(stagingNot16Bits, 8); } @Test public void testGenerateSearchNodes() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <engine>" + " <proton/>" + " </engine>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); } { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("1").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); } } @Test public void testAlternativeNodeSyntax() { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"test\">\n" + " <documents/>" + " <engine>" + " <proton/>" + " </engine>" + " <nodes>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " <node distribution-key=\"1\" hostalias=\"mockhost\"/>\n" + " </nodes>\n" + "</content>" ); DistributionConfig.Builder bob = new DistributionConfig.Builder(); cluster.getConfig(bob); DistributionConfig.Cluster.Group group = bob.build().cluster("test").group(0); assertEquals("invalid", group.name()); assertEquals("invalid", group.index()); assertEquals(2, group.nodes().size()); StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); cluster.getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals("invalid", config.group(0).name()); assertEquals("invalid", config.group(0).index()); assertEquals(2, config.group(0).nodes().size()); } @Test public void testReadyWhenInitialOne() { StorDistributionConfig.Builder builder = new StorDistributionConfig.Builder(); parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <redundancy>1</redundancy>\n" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>" ).getConfig(builder); StorDistributionConfig config = new StorDistributionConfig(builder); assertEquals(1, config.initial_redundancy()); assertEquals(1, config.redundancy()); assertEquals(1, config.ready_copies()); } public void testProvider(String tagName, StorServerConfig.Persistence_provider.Type.Enum type) { ContentCluster cluster = parse( "<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <redundancy>3</redundancy>" + " <engine>\n" + " <" + tagName + "/>\n" + " </engine>\n" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>" ); { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getStorageCluster().getConfig(builder); cluster.getStorageCluster().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); assertEquals(type, config.persistence_provider().type()); } { StorServerConfig.Builder builder = new StorServerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); cluster.getDistributorNodes().getChildren().get("0").getConfig(builder); StorServerConfig config = new StorServerConfig(builder); assertEquals(type, config.persistence_provider().type()); } } @Test public void testProviders() { testProvider("proton", StorServerConfig.Persistence_provider.Type.RPC); testProvider("dummy", StorServerConfig.Persistence_provider.Type.DUMMY); } @Test public void testMetrics() { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); ContentCluster cluster = parse("<content version=\"1.0\" id=\"storage\">\n" + " <documents/>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>\n" + " </group>\n" + "</content>" ); cluster.getConfig(builder); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); assertEquals(6, config.consumer().size()); assertEquals("status", config.consumer(0).name()); assertEquals("*", config.consumer(0).addedmetrics(0)); assertEquals("partofsum", config.consumer(0).removedtags(0)); assertEquals("log", config.consumer(1).name()); assertEquals("logdefault", config.consumer(1).tags().get(0)); assertEquals("loadtype", config.consumer(1).removedtags(0)); assertEquals("yamas", config.consumer(2).name()); assertEquals("yamasdefault", config.consumer(2).tags().get(0)); assertEquals("loadtype", config.consumer(2).removedtags(0)); assertEquals("health", config.consumer(3).name()); assertEquals("statereporter", config.consumer(5).name()); assertEquals("*", config.consumer(5).addedmetrics(0)); assertEquals("thread", config.consumer(5).removedtags(0)); assertEquals("partofsum", config.consumer(5).removedtags(1)); assertEquals(0, config.consumer(5).tags().size()); cluster.getStorageCluster().getConfig(builder); config = new MetricsmanagerConfig(builder); assertEquals(6, config.consumer().size()); assertEquals("fleetcontroller", config.consumer(4).name()); assertEquals(4, config.consumer(4).addedmetrics().size()); assertEquals("vds.datastored.alldisks.docs", config.consumer(4).addedmetrics(0)); assertEquals("vds.datastored.alldisks.bytes", config.consumer(4).addedmetrics(1)); assertEquals("vds.datastored.alldisks.buckets", config.consumer(4).addedmetrics(2)); assertEquals("vds.datastored.bucket_space.buckets_total", config.consumer(4).addedmetrics(3)); } public MetricsmanagerConfig.Consumer getConsumer(String consumer, MetricsmanagerConfig config) { for (MetricsmanagerConfig.Consumer c : config.consumer()) { if (c.name().equals(consumer)) { return c; } } return null; } @Test public void testConfiguredMetrics() { String xml = "" + "<services>" + "<content version=\"1.0\" id=\"storage\">\n" + " <redundancy>1</redundancy>\n" + " <documents>" + " <document type=\"type1\" mode='index'/>\n" + " <document type=\"type2\" mode='index'/>\n" + " </documents>" + " <group>\n" + " <node distribution-key=\"0\" hostalias=\"node0\"/>\n" + " </group>\n" + "</content>" + "<admin version=\"2.0\">" + " <logserver hostalias=\"node0\"/>" + " <adminserver hostalias=\"node0\"/>" + "</admin>" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("type1", "type2"); VespaModel model = new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); model.getConfig(builder, "storage/storage/0"); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); String expected = "[vds.filestor.alldisks.allthreads.put.sum\n" + "vds.filestor.alldisks.allthreads.get.sum\n" + "vds.filestor.alldisks.allthreads.remove.sum\n" + "vds.filestor.alldisks.allthreads.update.sum\n" + "vds.datastored.alldisks.docs\n" + "vds.datastored.alldisks.bytes\n" + "vds.filestor.alldisks.queuesize\n" + "vds.filestor.alldisks.averagequeuewait.sum\n" + "vds.visitor.cv_queuewaittime\n" + "vds.visitor.allthreads.averagequeuewait\n" + "vds.visitor.allthreads.averagevisitorlifetime\n" + "vds.visitor.allthreads.created.sum]"; String actual = getConsumer("log", config).addedmetrics().toString().replaceAll(", ", "\n"); assertEquals(expected, actual); assertEquals("[logdefault]", getConsumer("log", config).tags().toString()); expected = "[vds.datastored.alldisks.docs\n" + "vds.datastored.alldisks.bytes\n" + "vds.datastored.alldisks.buckets\n" + "vds.datastored.bucket_space.buckets_total]"; actual = getConsumer("fleetcontroller", config).addedmetrics().toString().replaceAll(", ", "\n"); assertEquals(expected, actual); } { MetricsmanagerConfig.Builder builder = new MetricsmanagerConfig.Builder(); model.getConfig(builder, "storage/distributor/0"); MetricsmanagerConfig config = new MetricsmanagerConfig(builder); assertEquals("[logdefault]", getConsumer("log", config).tags().toString()); } } @Test public void flush_on_shutdown_is_default_on_for_non_hosted() throws Exception { assertPrepareRestartCommand(createOneNodeCluster(false)); } @Test public void flush_on_shutdown_can_be_turned_off_for_non_hosted() throws Exception { assertNoPreShutdownCommand(createClusterWithFlushOnShutdownOverride(false, false)); } @Test public void flush_on_shutdown_is_default_on_for_hosted() throws Exception { assertPrepareRestartCommand(createOneNodeCluster(true)); } @Test public void flush_on_shutdown_can_be_turned_on_for_hosted() throws Exception { assertPrepareRestartCommand(createClusterWithFlushOnShutdownOverride(true, true)); } private static String oneNodeClusterXml() { return "<content version=\"1.0\" id=\"mockcluster\">" + " <documents/>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>"; } private static ContentCluster createOneNodeCluster(boolean isHostedVespa) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), new TestProperties().setHostedVespa(isHostedVespa)); } private static ContentCluster createOneNodeCluster(TestProperties props) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), props); } private static ContentCluster createOneNodeCluster(TestProperties props, Optional<Flavor> flavor) throws Exception { return createOneNodeCluster(oneNodeClusterXml(), props, flavor); } private static ContentCluster createClusterWithFlushOnShutdownOverride(boolean flushOnShutdown, boolean isHostedVespa) throws Exception { return createOneNodeCluster("<content version=\"1.0\" id=\"mockcluster\">" + " <documents/>" + " <engine>" + " <proton>" + " <flush-on-shutdown>" + flushOnShutdown + "</flush-on-shutdown>" + " </proton>" + " </engine>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>", new TestProperties().setHostedVespa(isHostedVespa)); } private static ContentCluster createOneNodeCluster(String clusterXml, TestProperties props) throws Exception { return createOneNodeCluster(clusterXml, props, Optional.empty()); } private static ContentCluster createOneNodeCluster(String clusterXml, TestProperties props, Optional<Flavor> flavor) throws Exception { DeployState.Builder deployStateBuilder = new DeployState.Builder() .properties(props); MockRoot root = flavor.isPresent() ? ContentClusterUtils.createMockRoot(new SingleNodeProvisioner(flavor.get()), Collections.emptyList(), deployStateBuilder) : ContentClusterUtils.createMockRoot(Collections.emptyList(), deployStateBuilder); ContentCluster cluster = ContentClusterUtils.createCluster(clusterXml, root); root.freezeModelTopology(); cluster.validate(); return cluster; } private static void assertPrepareRestartCommand(ContentCluster cluster) { Optional<String> command = cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand(); assertTrue(command.isPresent()); assertTrue(command.get().matches(".*vespa-proton-cmd [0-9]+ prepareRestart")); } private static void assertNoPreShutdownCommand(ContentCluster cluster) { Optional<String> command = cluster.getSearch().getSearchNodes().get(0).getPreShutdownCommand(); assertFalse(command.isPresent()); } @Test public void reserved_document_name_throws_exception() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("The following document types conflict with reserved keyword names: 'true'."); String xml = "<content version=\"1.0\" id=\"storage\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"true\" mode=\"index\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"mockhost\"/>" + " </group>" + "</content>"; List<String> sds = ApplicationPackageUtils.generateSchemas("true"); new VespaModelCreatorWithMockPkg(null, xml, sds).create(); } private void assertClusterHasBucketSpaceMappings(AllClustersBucketSpacesConfig config, String clusterId, List<String> defaultSpaceTypes, List<String> globalSpaceTypes) { AllClustersBucketSpacesConfig.Cluster cluster = config.cluster(clusterId); assertNotNull(cluster); assertEquals(defaultSpaceTypes.size() + globalSpaceTypes.size(), cluster.documentType().size()); assertClusterHasTypesInBucketSpace(cluster, "default", defaultSpaceTypes); assertClusterHasTypesInBucketSpace(cluster, "global", globalSpaceTypes); } private void assertClusterHasTypesInBucketSpace(AllClustersBucketSpacesConfig.Cluster cluster, String bucketSpace, List<String> expectedTypes) { for (String type : expectedTypes) { assertNotNull(cluster.documentType(type)); assertEquals(bucketSpace, cluster.documentType(type).bucketSpace()); } } private VespaModel createDualContentCluster() { String xml = "<services>" + "<admin version=\"2.0\">" + " <adminserver hostalias=\"node0\"/>" + "</admin>" + "<content version=\"1.0\" id=\"foo_c\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"bunnies\" mode=\"index\"/>" + " <document type=\"hares\" mode=\"index\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"node0\"/>" + " </group>" + "</content>" + "<content version=\"1.0\" id=\"bar_c\">" + " <redundancy>1</redundancy>" + " <documents>" + " <document type=\"rabbits\" mode=\"index\" global=\"true\"/>" + " </documents>" + " <group>" + " <node distribution-key=\"0\" hostalias=\"node0\"/>" + " </group>" + "</content>" + "</services>"; List<String> sds = ApplicationPackageUtils.generateSchemas("bunnies", "hares", "rabbits"); return new VespaModelCreatorWithMockPkg(getHosts(), xml, sds).create(); } @Test public void all_clusters_bucket_spaces_config_contains_mappings_across_all_clusters() { VespaModel model = createDualContentCluster(); AllClustersBucketSpacesConfig.Builder builder = new AllClustersBucketSpacesConfig.Builder(); model.getConfig(builder, "client"); AllClustersBucketSpacesConfig config = builder.build(); assertEquals(2, config.cluster().size()); assertClusterHasBucketSpaceMappings(config, "foo_c", Arrays.asList("bunnies", "hares"), Collections.emptyList()); assertClusterHasBucketSpaceMappings(config, "bar_c", Collections.emptyList(), Collections.singletonList("rabbits")); } @Test public void test_routing_with_multiple_clusters() { VespaModel model = createDualContentCluster(); Routing routing = model.getRouting(); assertNotNull(routing); assertEquals("[]", routing.getErrors().toString()); assertEquals(1, routing.getProtocols().size()); DocumentProtocol protocol = (DocumentProtocol) routing.getProtocols().get(0); RoutingTableSpec spec = protocol.getRoutingTableSpec(); assertEquals(3, spec.getNumHops()); assertEquals("docproc/cluster.bar_c.indexing/chain.indexing", spec.getHop(0).getName()); assertEquals("docproc/cluster.foo_c.indexing/chain.indexing", spec.getHop(1).getName()); assertEquals("indexing", spec.getHop(2).getName()); assertEquals(10, spec.getNumRoutes()); assertRoute(spec.getRoute(0), "bar_c", "[MessageType:bar_c]"); assertRoute(spec.getRoute(1), "bar_c-direct", "[Content:cluster=bar_c]"); assertRoute(spec.getRoute(2), "bar_c-index", "docproc/cluster.bar_c.indexing/chain.indexing", "[Content:cluster=bar_c]"); assertRoute(spec.getRoute(3), "default", "indexing"); assertRoute(spec.getRoute(4), "default-get", "indexing"); assertRoute(spec.getRoute(5), "foo_c", "[MessageType:foo_c]"); assertRoute(spec.getRoute(6), "foo_c-direct", "[Content:cluster=foo_c]"); assertRoute(spec.getRoute(7), "foo_c-index", "docproc/cluster.foo_c.indexing/chain.indexing", "[Content:cluster=foo_c]"); assertRoute(spec.getRoute(8), "storage/cluster.bar_c", "route:bar_c"); assertRoute(spec.getRoute(9), "storage/cluster.foo_c", "route:foo_c"); } private ContentCluster createWithZone(String clusterXml, Zone zone) throws Exception { DeployState.Builder deployStateBuilder = new DeployState.Builder() .zone(zone) .properties(new TestProperties().setHostedVespa(true)); List<String> schemas = SchemaBuilder.createSchemas("test"); MockRoot root = ContentClusterUtils.createMockRoot(schemas, deployStateBuilder); ContentCluster cluster = ContentClusterUtils.createCluster(clusterXml, root); root.freezeModelTopology(); cluster.validate(); return cluster; } private void assertDistributionBitsInConfig(ContentCluster cluster, int distributionBits) { FleetcontrollerConfig.Builder builder = new FleetcontrollerConfig.Builder(); cluster.getConfig(builder); cluster.getClusterControllerConfig().getConfig(builder); FleetcontrollerConfig config = new FleetcontrollerConfig(builder); assertEquals(distributionBits, config.ideal_distribution_bits()); StorDistributormanagerConfig.Builder sdBuilder = new StorDistributormanagerConfig.Builder(); cluster.getConfig(sdBuilder); StorDistributormanagerConfig storDistributormanagerConfig = new StorDistributormanagerConfig(sdBuilder); assertEquals(distributionBits, storDistributormanagerConfig.minsplitcount()); } private void verifyTopKProbabilityPropertiesControl() { VespaModel model = createEnd2EndOneNode(new TestProperties()); ContentCluster cc = model.getContentClusters().get("storage"); DispatchConfig.Builder builder = new DispatchConfig.Builder(); cc.getSearch().getConfig(builder); DispatchConfig cfg = new DispatchConfig(builder); assertEquals(0.9999, cfg.topKProbability(), 0.0); } @Test public void default_topKprobability_controlled_by_properties() { verifyTopKProbabilityPropertiesControl(); } private boolean resolveThreePhaseUpdateConfigWithFeatureFlag(boolean flagEnableThreePhase) { VespaModel model = createEnd2EndOneNode(new TestProperties().setUseThreePhaseUpdates(flagEnableThreePhase)); ContentCluster cc = model.getContentClusters().get("storage"); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).enable_metadata_only_fetch_phase_for_inconsistent_updates(); } @Test public void default_distributor_three_phase_update_config_controlled_by_properties() { assertFalse(resolveThreePhaseUpdateConfigWithFeatureFlag(false)); assertTrue(resolveThreePhaseUpdateConfigWithFeatureFlag(true)); } private int resolveMaxCompactBuffers(OptionalInt maxCompactBuffers) { TestProperties testProperties = new TestProperties(); if (maxCompactBuffers.isPresent()) { testProperties.maxCompactBuffers(maxCompactBuffers.getAsInt()); } VespaModel model = createEnd2EndOneNode(testProperties); ContentCluster cc = model.getContentClusters().get("storage"); ProtonConfig.Builder protonBuilder = new ProtonConfig.Builder(); cc.getSearch().getConfig(protonBuilder); ProtonConfig protonConfig = new ProtonConfig(protonBuilder); assertEquals(1, protonConfig.documentdb().size()); return protonConfig.documentdb(0).allocation().max_compact_buffers(); } @Test void assertZookeeperServerImplementation(String expectedClassName, ClusterControllerContainerCluster clusterControllerCluster) { for (ClusterControllerContainer c : clusterControllerCluster.getContainers()) { var builder = new ComponentsConfig.Builder(); c.getConfig(builder); assertEquals(1, new ComponentsConfig(builder).components().stream() .filter(component -> component.classId().equals(expectedClassName)) .count()); } } private StorDistributormanagerConfig resolveStorDistributormanagerConfig(TestProperties props) throws Exception { var cc = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)); } private int resolveMaxInhibitedGroupsConfigWithFeatureFlag(int maxGroups) throws Exception { var cfg = resolveStorDistributormanagerConfig(new TestProperties().maxActivationInhibitedOutOfSyncGroups(maxGroups)); return cfg.max_activation_inhibited_out_of_sync_groups(); } @Test public void default_distributor_max_inhibited_group_activation_config_controlled_by_properties() throws Exception { assertEquals(0, resolveMaxInhibitedGroupsConfigWithFeatureFlag(0)); assertEquals(2, resolveMaxInhibitedGroupsConfigWithFeatureFlag(2)); } private int resolveNumDistributorStripesConfig(Optional<Flavor> flavor) throws Exception { var cc = createOneNodeCluster(new TestProperties(), flavor); var builder = new StorDistributormanagerConfig.Builder(); cc.getDistributorNodes().getChildren().get("0").getConfig(builder); return (new StorDistributormanagerConfig(builder)).num_distributor_stripes(); } private int resolveTunedNumDistributorStripesConfig(int numCpuCores) throws Exception { var flavor = new Flavor(new FlavorsConfig.Flavor(new FlavorsConfig.Flavor.Builder().name("test").minCpuCores(numCpuCores))); return resolveNumDistributorStripesConfig(Optional.of(flavor)); } @Test public void num_distributor_stripes_config_defaults_to_zero() throws Exception { assertEquals(0, resolveNumDistributorStripesConfig(Optional.empty())); } @Test public void num_distributor_stripes_config_tuned_by_flavor() throws Exception { assertEquals(1, resolveTunedNumDistributorStripesConfig(1)); assertEquals(1, resolveTunedNumDistributorStripesConfig(16)); assertEquals(2, resolveTunedNumDistributorStripesConfig(17)); assertEquals(2, resolveTunedNumDistributorStripesConfig(64)); assertEquals(4, resolveTunedNumDistributorStripesConfig(65)); } @Test public void distributor_merge_busy_wait_controlled_by_properties() throws Exception { assertEquals(10, resolveDistributorMergeBusyWaitConfig(Optional.empty())); assertEquals(1, resolveDistributorMergeBusyWaitConfig(Optional.of(1))); } private int resolveDistributorMergeBusyWaitConfig(Optional<Integer> mergeBusyWait) throws Exception { var props = new TestProperties(); if (mergeBusyWait.isPresent()) { props.setDistributorMergeBusyWait(mergeBusyWait.get()); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).inhibit_merge_sending_on_busy_node_duration_sec(); } @Test public void distributor_enhanced_maintenance_scheduling_controlled_by_properties() throws Exception { assertFalse(resolveDistributorEnhancedSchedulingConfig(false)); assertTrue(resolveDistributorEnhancedSchedulingConfig(true)); } private boolean resolveDistributorEnhancedSchedulingConfig(boolean enhancedScheduling) throws Exception { var props = new TestProperties(); if (enhancedScheduling) { props.distributorEnhancedMaintenanceScheduling(enhancedScheduling); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).implicitly_clear_bucket_priority_on_schedule(); } @Test public void unordered_merge_chaining_config_controlled_by_properties() throws Exception { assertFalse(resolveUnorderedMergeChainingConfig(false)); assertTrue(resolveUnorderedMergeChainingConfig(true)); } private boolean resolveUnorderedMergeChainingConfig(boolean unorderedMergeChaining) throws Exception { var props = new TestProperties(); if (unorderedMergeChaining) { props.setUnorderedMergeChaining(true); } var cluster = createOneNodeCluster(props); var builder = new StorDistributormanagerConfig.Builder(); cluster.getDistributorNodes().getConfig(builder); return (new StorDistributormanagerConfig(builder)).use_unordered_merge_chaining(); } @Test public void testDedicatedClusterControllers() { VespaModel noContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " </services>"); assertEquals(Map.of(), noContentModel.getContentClusters()); assertNull("No cluster controller without content", noContentModel.getAdmin().getClusterControllers()); VespaModel oneContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " <content id='storage' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " </content>" + " </services>"); assertNotNull("Shared cluster controller with content", oneContentModel.getAdmin().getClusterControllers()); String twoContentServices = "<?xml version='1.0' encoding='UTF-8' ?>" + "<services version='1.0'>" + " <container id='default' version='1.0' />" + " <content id='storage' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " <tuning>" + " <cluster-controller>" + " <min-distributor-up-ratio>0.618</min-distributor-up-ratio>" + " </cluster-controller>" + " </tuning>" + " </content>" + " <content id='dev-null' version='1.0'>" + " <redundancy>1</redundancy>" + " <documents>" + " <document mode='index' type='type1' />" + " </documents>" + " <tuning>" + " <cluster-controller>" + " <min-distributor-up-ratio>0.418</min-distributor-up-ratio>" + " </cluster-controller>" + " </tuning>" + " </content>" + " </services>"; VespaModel twoContentModel = createEnd2EndOneNode(new TestProperties().setHostedVespa(true) .setMultitenant(true), twoContentServices); assertNotNull("Shared cluster controller with content", twoContentModel.getAdmin().getClusterControllers()); ClusterControllerContainerCluster clusterControllers = twoContentModel.getAdmin().getClusterControllers(); assertEquals(1, clusterControllers.reindexingContext().documentTypesForCluster("storage").size()); assertEquals(1, clusterControllers.reindexingContext().documentTypesForCluster("dev-null").size()); var storageBuilder = new FleetcontrollerConfig.Builder(); var devNullBuilder = new FleetcontrollerConfig.Builder(); twoContentModel.getConfig(storageBuilder, "admin/standalone/cluster-controllers/0/components/clustercontroller-storage-configurer"); twoContentModel.getConfig(devNullBuilder, "admin/standalone/cluster-controllers/0/components/clustercontroller-dev-null-configurer"); assertEquals(0.618, storageBuilder.build().min_distributor_up_ratio(), 1e-9); assertEquals(0.418, devNullBuilder.build().min_distributor_up_ratio(), 1e-9); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.ReconfigurableVespaZooKeeperServer", clusterControllers); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.Reconfigurer", clusterControllers); assertZookeeperServerImplementation("com.yahoo.vespa.zookeeper.VespaZooKeeperAdminImpl", clusterControllers); } }
😆
public SchemaValidators(Version vespaVersion) { File schemaDir = null; try { schemaDir = saveSchemasFromJar(new File(SchemaValidators.schemaDirBase), vespaVersion); servicesXmlValidator = createValidator(schemaDir, servicesXmlSchemaName); hostsXmlValidator = createValidator(schemaDir, hostsXmlSchemaName); deploymentXmlValidator = createValidator(schemaDir, deploymentXmlSchemaName); validationOverridesXmlValidator = createValidator(schemaDir, validationOverridesXmlSchemaName); containerIncludeXmlValidator = createValidator(schemaDir, containerIncludeXmlSchemaName); routingStandaloneXmlValidator = createValidator(schemaDir, routingStandaloneXmlSchemaName); } catch (IOException ioe) { throw new RuntimeException(ioe); } finally { if (schemaDir != null) IOUtils.recursiveDeleteDir(schemaDir); } }
if (schemaDir != null)
public SchemaValidators(Version vespaVersion) { File schemaDir = null; try { schemaDir = saveSchemasFromJar(new File(SchemaValidators.schemaDirBase), vespaVersion); servicesXmlValidator = createValidator(schemaDir, servicesXmlSchemaName); hostsXmlValidator = createValidator(schemaDir, hostsXmlSchemaName); deploymentXmlValidator = createValidator(schemaDir, deploymentXmlSchemaName); validationOverridesXmlValidator = createValidator(schemaDir, validationOverridesXmlSchemaName); containerIncludeXmlValidator = createValidator(schemaDir, containerIncludeXmlSchemaName); routingStandaloneXmlValidator = createValidator(schemaDir, routingStandaloneXmlSchemaName); } catch (IOException ioe) { throw new RuntimeException(ioe); } finally { if (schemaDir != null) IOUtils.recursiveDeleteDir(schemaDir); } }
class SchemaValidators { private static final String schemaDirBase = System.getProperty("java.io.tmpdir", File.separator + "tmp" + File.separator + "vespa"); private static final Logger log = Logger.getLogger(SchemaValidators.class.getName()); private static final String servicesXmlSchemaName = "services.rnc"; private static final String hostsXmlSchemaName = "hosts.rnc"; private static final String deploymentXmlSchemaName = "deployment.rnc"; private static final String validationOverridesXmlSchemaName = "validation-overrides.rnc"; private static final String containerIncludeXmlSchemaName = "container-include.rnc"; private static final String routingStandaloneXmlSchemaName = "routing-standalone.rnc"; private final SchemaValidator servicesXmlValidator; private final SchemaValidator hostsXmlValidator; private final SchemaValidator deploymentXmlValidator; private final SchemaValidator validationOverridesXmlValidator; private final SchemaValidator containerIncludeXmlValidator; private final SchemaValidator routingStandaloneXmlValidator; /** * Initializes the validator by using the given file as schema file * * @param vespaVersion the version of Vespa we should validate against */ public SchemaValidator servicesXmlValidator() { return servicesXmlValidator; } public SchemaValidator hostsXmlValidator() { return hostsXmlValidator; } public SchemaValidator deploymentXmlValidator() { return deploymentXmlValidator; } SchemaValidator validationOverridesXmlValidator() { return validationOverridesXmlValidator; } SchemaValidator containerIncludeXmlValidator() { return containerIncludeXmlValidator; } SchemaValidator routingStandaloneXmlValidator() { return routingStandaloneXmlValidator; } /** * Looks for schema files in config-model.jar and saves them in a temp dir. Uses schema files * in $VESPA_HOME/share/vespa/schema/[major-version].x/ otherwise * * @return the directory the schema files are stored in * @throws IOException if it is not possible to read schema files */ private File saveSchemasFromJar(File tmpBase, Version vespaVersion) throws IOException { Class<? extends SchemaValidators> schemaValidatorClass = this.getClass(); Enumeration<URL> uris = schemaValidatorClass.getClassLoader().getResources("schema"); if (uris == null) throw new IllegalArgumentException("Could not find XML schemas "); File tmpDir = createTempDirectory(tmpBase.toPath(), "vespa").toFile(); log.log(Level.FINE, () -> "Will save all XML schemas for " + vespaVersion + " to " + tmpDir); boolean schemasFound = false; while (uris.hasMoreElements()) { URL u = uris.nextElement(); if ("jar".equals(u.getProtocol())) { JarURLConnection jarConnection = (JarURLConnection) u.openConnection(); JarFile jarFile = jarConnection.getJarFile(); for (Enumeration<JarEntry> entries = jarFile.entries(); entries.hasMoreElements(); ) { JarEntry je = entries.nextElement(); if (je.getName().startsWith("schema/") && je.getName().endsWith(".rnc")) { schemasFound = true; writeContentsToFile(tmpDir, je.getName(), jarFile.getInputStream(je)); } } jarFile.close(); } else if ("bundle".equals(u.getProtocol())) { Bundle bundle = getBundle(schemaValidatorClass); if (bundle == null) { String pathPrefix = getDefaults().underVespaHome("share/vespa/schema/"); File schemaPath = new File(pathPrefix + "version/" + vespaVersion.getMajor() + ".x/schema/"); if (! schemaPath.exists()) { log.log(Level.INFO, "Found no schemas in " + schemaPath + ", fallback to schemas in " + pathPrefix); schemaPath = new File(pathPrefix); } log.log(Level.FINE, "Using schemas found in " + schemaPath); schemasFound = true; copySchemas(schemaPath, tmpDir); } else { log.log(Level.FINE, () -> String.format("Saving schemas for model bundle %s:%s", bundle.getSymbolicName(), bundle.getVersion())); for (Enumeration<URL> entries = bundle.findEntries("schema", "*.rnc", true); entries.hasMoreElements(); ) { URL url = entries.nextElement(); writeContentsToFile(tmpDir, url.getFile(), url.openStream()); schemasFound = true; } } } else if ("file".equals(u.getProtocol())) { File schemaPath = new File(u.getPath()); copySchemas(schemaPath, tmpDir); schemasFound = true; } } if ( ! schemasFound) { IOUtils.recursiveDeleteDir(tmpDir); throw new IllegalArgumentException("Could not find schemas for version " + vespaVersion); } return tmpDir; } private static void copySchemas(File from, File to) throws IOException { if (! from.exists()) throw new IOException("Could not find schema source directory '" + from + "'"); if (! from.isDirectory()) throw new IOException("Schema source '" + from + "' is not a directory"); File sourceFile = new File(from, servicesXmlSchemaName); if (! sourceFile.exists()) throw new IOException("Schema source file '" + sourceFile + "' not found"); IOUtils.copyDirectoryInto(from, to); } private static void writeContentsToFile(File outDir, String outFile, InputStream inputStream) throws IOException { String contents = IOUtils.readAll(new InputStreamReader(inputStream)); File out = new File(outDir, outFile); IOUtils.writeFile(out, contents, false); } private SchemaValidator createValidator(File schemaDir, String schemaFile) { try { File file = new File(schemaDir + File.separator + "schema" + File.separator + schemaFile); return new SchemaValidator(file, new BaseDeployLogger()); } catch (SAXException e) { throw new RuntimeException("Invalid schema '" + schemaFile + "'", e); } catch (IOException e) { throw new RuntimeException("IO error reading schema '" + schemaFile + "'", e); } } }
class SchemaValidators { private static final String schemaDirBase = System.getProperty("java.io.tmpdir", File.separator + "tmp" + File.separator + "vespa"); private static final Logger log = Logger.getLogger(SchemaValidators.class.getName()); private static final String servicesXmlSchemaName = "services.rnc"; private static final String hostsXmlSchemaName = "hosts.rnc"; private static final String deploymentXmlSchemaName = "deployment.rnc"; private static final String validationOverridesXmlSchemaName = "validation-overrides.rnc"; private static final String containerIncludeXmlSchemaName = "container-include.rnc"; private static final String routingStandaloneXmlSchemaName = "routing-standalone.rnc"; private final SchemaValidator servicesXmlValidator; private final SchemaValidator hostsXmlValidator; private final SchemaValidator deploymentXmlValidator; private final SchemaValidator validationOverridesXmlValidator; private final SchemaValidator containerIncludeXmlValidator; private final SchemaValidator routingStandaloneXmlValidator; /** * Initializes the validator by using the given file as schema file * * @param vespaVersion the version of Vespa we should validate against */ public SchemaValidator servicesXmlValidator() { return servicesXmlValidator; } public SchemaValidator hostsXmlValidator() { return hostsXmlValidator; } public SchemaValidator deploymentXmlValidator() { return deploymentXmlValidator; } SchemaValidator validationOverridesXmlValidator() { return validationOverridesXmlValidator; } SchemaValidator containerIncludeXmlValidator() { return containerIncludeXmlValidator; } SchemaValidator routingStandaloneXmlValidator() { return routingStandaloneXmlValidator; } /** * Looks for schema files in config-model.jar and saves them in a temp dir. Uses schema files * in $VESPA_HOME/share/vespa/schema/[major-version].x/ otherwise * * @return the directory the schema files are stored in * @throws IOException if it is not possible to read schema files */ private File saveSchemasFromJar(File tmpBase, Version vespaVersion) throws IOException { Class<? extends SchemaValidators> schemaValidatorClass = this.getClass(); Enumeration<URL> uris = schemaValidatorClass.getClassLoader().getResources("schema"); if (uris == null) throw new IllegalArgumentException("Could not find XML schemas "); File tmpDir = createTempDirectory(tmpBase.toPath(), "vespa").toFile(); log.log(Level.FINE, () -> "Will save all XML schemas for " + vespaVersion + " to " + tmpDir); boolean schemasFound = false; while (uris.hasMoreElements()) { URL u = uris.nextElement(); if ("jar".equals(u.getProtocol())) { JarURLConnection jarConnection = (JarURLConnection) u.openConnection(); JarFile jarFile = jarConnection.getJarFile(); for (Enumeration<JarEntry> entries = jarFile.entries(); entries.hasMoreElements(); ) { JarEntry je = entries.nextElement(); if (je.getName().startsWith("schema/") && je.getName().endsWith(".rnc")) { schemasFound = true; writeContentsToFile(tmpDir, je.getName(), jarFile.getInputStream(je)); } } jarFile.close(); } else if ("bundle".equals(u.getProtocol())) { Bundle bundle = getBundle(schemaValidatorClass); if (bundle == null) { String pathPrefix = getDefaults().underVespaHome("share/vespa/schema/"); File schemaPath = new File(pathPrefix + "version/" + vespaVersion.getMajor() + ".x/schema/"); if (! schemaPath.exists()) { log.log(Level.INFO, "Found no schemas in " + schemaPath + ", fallback to schemas in " + pathPrefix); schemaPath = new File(pathPrefix); } log.log(Level.FINE, "Using schemas found in " + schemaPath); schemasFound = true; copySchemas(schemaPath, tmpDir); } else { log.log(Level.FINE, () -> String.format("Saving schemas for model bundle %s:%s", bundle.getSymbolicName(), bundle.getVersion())); for (Enumeration<URL> entries = bundle.findEntries("schema", "*.rnc", true); entries.hasMoreElements(); ) { URL url = entries.nextElement(); writeContentsToFile(tmpDir, url.getFile(), url.openStream()); schemasFound = true; } } } else if ("file".equals(u.getProtocol())) { File schemaPath = new File(u.getPath()); copySchemas(schemaPath, tmpDir); schemasFound = true; } } if ( ! schemasFound) { IOUtils.recursiveDeleteDir(tmpDir); throw new IllegalArgumentException("Could not find schemas for version " + vespaVersion); } return tmpDir; } private static void copySchemas(File from, File to) throws IOException { if (! from.exists()) throw new IOException("Could not find schema source directory '" + from + "'"); if (! from.isDirectory()) throw new IOException("Schema source '" + from + "' is not a directory"); File sourceFile = new File(from, servicesXmlSchemaName); if (! sourceFile.exists()) throw new IOException("Schema source file '" + sourceFile + "' not found"); IOUtils.copyDirectoryInto(from, to); } private static void writeContentsToFile(File outDir, String outFile, InputStream inputStream) throws IOException { String contents = IOUtils.readAll(new InputStreamReader(inputStream)); File out = new File(outDir, outFile); IOUtils.writeFile(out, contents, false); } private SchemaValidator createValidator(File schemaDir, String schemaFile) { try { File file = new File(schemaDir + File.separator + "schema" + File.separator + schemaFile); return new SchemaValidator(file, new BaseDeployLogger()); } catch (SAXException e) { throw new RuntimeException("Invalid schema '" + schemaFile + "'", e); } catch (IOException e) { throw new RuntimeException("IO error reading schema '" + schemaFile + "'", e); } } }
Should this be 0.07 for now?
private static ProtonConfig configFromDiskSetting(int diskGb) { return configFromDiskSetting(diskGb, 0.05); }
return configFromDiskSetting(diskGb, 0.05);
private static ProtonConfig configFromDiskSetting(int diskGb) { return configFromDiskSetting(diskGb, 0.07); }
class NodeResourcesTuningTest { private static final double delta = 0.00001; private static final double combinedFactor = 1 - 18.0/100; @Test public void require_that_hwinfo_disk_size_is_set() { ProtonConfig cfg = configFromDiskSetting(100); assertEquals(100 * GB, cfg.hwinfo().disk().size()); } @Test public void require_that_hwinfo_memory_size_is_set() { assertEquals(24 * GB, configFromMemorySetting(24 + reservedMemoryGb, 0).hwinfo().memory().size()); assertEquals(combinedFactor * 24 * GB, configFromMemorySetting(24 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).hwinfo().memory().size(), 1000); } @Test public void reserved_memory_on_content_node_is_0_5_gb() { assertEquals(0.5, reservedMemoryGb, delta); } private ProtonConfig getProtonMemoryConfig(List<Pair<String, String>> sdAndMode, double gb, int redundancy, int searchableCopies) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); for (Pair<String, String> sdMode : sdAndMode) { builder.documentdb.add(new ProtonConfig.Documentdb.Builder() .inputdoctypename(sdMode.getFirst()) .configid("some/config/id/" + sdMode.getFirst()) .mode(ProtonConfig.Documentdb.Mode.Enum.valueOf(sdMode.getSecond()))); } return configFromMemorySetting(gb, builder); } private void verify_that_initial_numdocs_is_dependent_of_mode(int redundancy, int searchablecopies) { ProtonConfig cfg = getProtonMemoryConfig(Arrays.asList(new Pair<>("a", "INDEX"), new Pair<>("b", "STREAMING"), new Pair<>("c", "STORE_ONLY")), 24 + reservedMemoryGb, redundancy, searchablecopies); assertEquals(3, cfg.documentdb().size()); assertEquals(1024, cfg.documentdb(0).allocation().initialnumdocs()); assertEquals("a", cfg.documentdb(0).inputdoctypename()); assertEquals(24 * GB / 46, cfg.documentdb(1).allocation().initialnumdocs()); assertEquals("b", cfg.documentdb(1).inputdoctypename()); assertEquals(24 * GB / 46, cfg.documentdb(2).allocation().initialnumdocs()); assertEquals("c", cfg.documentdb(2).inputdoctypename()); } @Test public void require_that_initial_numdocs_is_dependent_of_mode_and_searchablecopies() { verify_that_initial_numdocs_is_dependent_of_mode(2,0); verify_that_initial_numdocs_is_dependent_of_mode(1,1); verify_that_initial_numdocs_is_dependent_of_mode(3, 2); verify_that_initial_numdocs_is_dependent_of_mode(3, 3); } @Test public void require_that_hwinfo_cpu_cores_is_set() { ProtonConfig cfg = configFromNumCoresSetting(24); assertEquals(24, cfg.hwinfo().cpu().cores()); } @Test public void require_that_num_search_threads_and_summary_threads_follow_cores() { ProtonConfig cfg = configFromNumCoresSetting(4.5); assertEquals(5, cfg.numsearcherthreads()); assertEquals(5, cfg.numsummarythreads()); assertEquals(1, cfg.numthreadspersearch()); } @Test public void require_that_num_search_threads_and_considers_explict_num_threads_per_search() { ProtonConfig cfg = configFromNumCoresSetting(4.5, 3); assertEquals(15, cfg.numsearcherthreads()); assertEquals(5, cfg.numsummarythreads()); assertEquals(3, cfg.numthreadspersearch()); } @Test public void require_that_fast_disk_is_reflected_in_proton_config() { ProtonConfig cfg = configFromDiskSetting(true); assertEquals(200, cfg.hwinfo().disk().writespeed(), delta); assertEquals(100, cfg.hwinfo().disk().slowwritespeedlimit(), delta); } @Test public void require_that_slow_disk_is_reflected_in_proton_config() { ProtonConfig cfg = configFromDiskSetting(false); assertEquals(40, cfg.hwinfo().disk().writespeed(), delta); assertEquals(100, cfg.hwinfo().disk().slowwritespeedlimit(), delta); } @Test public void require_that_document_store_maxfilesize_is_set_based_on_available_memory() { assertDocumentStoreMaxFileSize(256 * MB, 4); assertDocumentStoreMaxFileSize(256 * MB, 6); assertDocumentStoreMaxFileSize(256 * MB, 8); assertDocumentStoreMaxFileSize(256 * MB, 12); assertDocumentStoreMaxFileSize(512 * MB, 16); assertDocumentStoreMaxFileSize(1 * GB, 24); assertDocumentStoreMaxFileSize(1 * GB, 32); assertDocumentStoreMaxFileSize(1 * GB, 48); assertDocumentStoreMaxFileSize(1 * GB, 64); assertDocumentStoreMaxFileSize(4 * GB, 128); assertDocumentStoreMaxFileSize(4 * GB, 256); assertDocumentStoreMaxFileSize(4 * GB, 512); } @Test public void require_that_flush_strategy_memory_limits_are_set_based_on_available_memory() { assertFlushStrategyMemory(512 * MB, 4); assertFlushStrategyMemory(1 * GB, 8); assertFlushStrategyMemory(3 * GB, 24); assertFlushStrategyMemory(8 * GB, 64); } @Test public void require_that_flush_strategy_tls_size_is_set_based_on_available_disk() { assertFlushStrategyTlsSize(2 * GB, 10, 0.05); assertFlushStrategyTlsSize(7 * GB, 100, 0.07); assertFlushStrategyTlsSize(5 * GB, 100, 0.05); assertFlushStrategyTlsSize(35 * GB, 500, 0.07); assertFlushStrategyTlsSize(84 * GB, 1200, 0.07); assertFlushStrategyTlsSize(100 * GB, 1720, 0.07); assertFlushStrategyTlsSize(100 * GB, 24000, 0.07); } @Test public void require_that_summary_read_io_is_set_based_on_disk() { assertSummaryReadIo(ProtonConfig.Summary.Read.Io.DIRECTIO, true); assertSummaryReadIo(ProtonConfig.Summary.Read.Io.MMAP, false); } @Test public void require_that_search_read_mmap_advise_is_set_based_on_disk() { assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.RANDOM, true); assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.NORMAL, false); } @Test public void require_that_summary_cache_max_bytes_is_set_based_on_memory() { assertEquals(1*GB / 20, configFromMemorySetting(1 + reservedMemoryGb, 0).summary().cache().maxbytes()); assertEquals(256*GB / 20, configFromMemorySetting(256 + reservedMemoryGb, 0).summary().cache().maxbytes()); } @Test public void require_that_summary_cache_memory_is_reduced_with_combined_cluster() { assertEquals(combinedFactor * 1*GB / 20, configFromMemorySetting(1 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).summary().cache().maxbytes(), 1000); assertEquals(combinedFactor * 256*GB / 20, configFromMemorySetting(256 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).summary().cache().maxbytes(), 1000); } @Test public void require_that_docker_node_is_tagged_with_shared_disk() { assertSharedDisk(true, true); } private static void assertDocumentStoreMaxFileSize(long expFileSizeBytes, int wantedMemoryGb) { assertEquals(expFileSizeBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).summary().log().maxfilesize()); } private static void assertFlushStrategyMemory(long expMemoryBytes, int wantedMemoryGb) { assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().maxmemory()); assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().each().maxmemory()); } private static void assertFlushStrategyTlsSize(long expTlsSizeBytes, int diskGb, double tlsSizeFraction) { assertEquals(expTlsSizeBytes, configFromDiskSetting(diskGb, tlsSizeFraction).flush().memory().maxtlssize()); } private static void assertSummaryReadIo(ProtonConfig.Summary.Read.Io.Enum expValue, boolean fastDisk) { assertEquals(expValue, configFromDiskSetting(fastDisk).summary().read().io()); } private static void assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.Enum expValue, boolean fastDisk) { assertEquals(expValue, configFromDiskSetting(fastDisk).search().mmap().advise()); } private static void assertSharedDisk(boolean sharedDisk, boolean docker) { assertEquals(sharedDisk, configFromEnvironmentType(docker).hwinfo().disk().shared()); } private static void assertWriteFilter(double expMemoryLimit, int memoryGb) { assertEquals(expMemoryLimit, configFromMemorySetting(memoryGb, 0).writefilter().memorylimit(), delta); } private static ProtonConfig configFromDiskSetting(boolean fastDisk) { return getConfig(new FlavorsConfig.Flavor.Builder().fastDisk(fastDisk)); } private static ProtonConfig configFromDiskSetting(int diskGb, double tlsSizeFraction) { return getConfig(new FlavorsConfig.Flavor.Builder().minDiskAvailableGb(diskGb), 0, tlsSizeFraction); } private static ProtonConfig configFromMemorySetting(double memoryGb, double fractionOfMemoryReserved) { return getConfig(new FlavorsConfig.Flavor.Builder().minMainMemoryAvailableGb(memoryGb), fractionOfMemoryReserved, 0.07); } private static ProtonConfig configFromMemorySetting(double memoryGb, ProtonConfig.Builder builder) { return getConfig(new FlavorsConfig.Flavor.Builder() .minMainMemoryAvailableGb(memoryGb), builder); } private static ProtonConfig configFromNumCoresSetting(double numCores) { return getConfig(new FlavorsConfig.Flavor.Builder().minCpuCores(numCores)); } private static ProtonConfig configFromNumCoresSetting(double numCores, int numThreadsPerSearch) { return getConfig(new FlavorsConfig.Flavor.Builder().minCpuCores(numCores), new ProtonConfig.Builder(), numThreadsPerSearch); } private static ProtonConfig configFromEnvironmentType(boolean docker) { String environment = (docker ? "DOCKER_CONTAINER" : "undefined"); return getConfig(new FlavorsConfig.Flavor.Builder().environment(environment)); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder) { return getConfig(flavorBuilder, new ProtonConfig.Builder()); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, double fractionOfMemoryReserved, double tlsSizeFraction) { return getConfig(flavorBuilder, new ProtonConfig.Builder(), fractionOfMemoryReserved, tlsSizeFraction); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder) { return getConfig(flavorBuilder, protonBuilder,1); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, double fractionOfMemoryReserved, double tlsSizeFraction) { return getConfig(flavorBuilder, protonBuilder, 1, fractionOfMemoryReserved, tlsSizeFraction); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, int numThreadsPerSearch) { return getConfig(flavorBuilder, protonBuilder, numThreadsPerSearch, 0, 0.07); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, int numThreadsPerSearch, double fractionOfMemoryReserved, double tlsSizeFraction) { flavorBuilder.name("my_flavor"); NodeResourcesTuning tuning = new NodeResourcesTuning(new Flavor(new FlavorsConfig.Flavor(flavorBuilder)).resources(), numThreadsPerSearch, fractionOfMemoryReserved, tlsSizeFraction); tuning.getConfig(protonBuilder); return new ProtonConfig(protonBuilder); } }
class NodeResourcesTuningTest { private static final double delta = 0.00001; private static final double combinedFactor = 1 - 18.0/100; @Test public void require_that_hwinfo_disk_size_is_set() { ProtonConfig cfg = configFromDiskSetting(100); assertEquals(100 * GB, cfg.hwinfo().disk().size()); } @Test public void require_that_hwinfo_memory_size_is_set() { assertEquals(24 * GB, configFromMemorySetting(24 + reservedMemoryGb, 0).hwinfo().memory().size()); assertEquals(combinedFactor * 24 * GB, configFromMemorySetting(24 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).hwinfo().memory().size(), 1000); } @Test public void reserved_memory_on_content_node_is_0_5_gb() { assertEquals(0.5, reservedMemoryGb, delta); } private ProtonConfig getProtonMemoryConfig(List<Pair<String, String>> sdAndMode, double gb, int redundancy, int searchableCopies) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); for (Pair<String, String> sdMode : sdAndMode) { builder.documentdb.add(new ProtonConfig.Documentdb.Builder() .inputdoctypename(sdMode.getFirst()) .configid("some/config/id/" + sdMode.getFirst()) .mode(ProtonConfig.Documentdb.Mode.Enum.valueOf(sdMode.getSecond()))); } return configFromMemorySetting(gb, builder); } private void verify_that_initial_numdocs_is_dependent_of_mode(int redundancy, int searchablecopies) { ProtonConfig cfg = getProtonMemoryConfig(Arrays.asList(new Pair<>("a", "INDEX"), new Pair<>("b", "STREAMING"), new Pair<>("c", "STORE_ONLY")), 24 + reservedMemoryGb, redundancy, searchablecopies); assertEquals(3, cfg.documentdb().size()); assertEquals(1024, cfg.documentdb(0).allocation().initialnumdocs()); assertEquals("a", cfg.documentdb(0).inputdoctypename()); assertEquals(24 * GB / 46, cfg.documentdb(1).allocation().initialnumdocs()); assertEquals("b", cfg.documentdb(1).inputdoctypename()); assertEquals(24 * GB / 46, cfg.documentdb(2).allocation().initialnumdocs()); assertEquals("c", cfg.documentdb(2).inputdoctypename()); } @Test public void require_that_initial_numdocs_is_dependent_of_mode_and_searchablecopies() { verify_that_initial_numdocs_is_dependent_of_mode(2,0); verify_that_initial_numdocs_is_dependent_of_mode(1,1); verify_that_initial_numdocs_is_dependent_of_mode(3, 2); verify_that_initial_numdocs_is_dependent_of_mode(3, 3); } @Test public void require_that_hwinfo_cpu_cores_is_set() { ProtonConfig cfg = configFromNumCoresSetting(24); assertEquals(24, cfg.hwinfo().cpu().cores()); } @Test public void require_that_num_search_threads_and_summary_threads_follow_cores() { ProtonConfig cfg = configFromNumCoresSetting(4.5); assertEquals(5, cfg.numsearcherthreads()); assertEquals(5, cfg.numsummarythreads()); assertEquals(1, cfg.numthreadspersearch()); } @Test public void require_that_num_search_threads_and_considers_explict_num_threads_per_search() { ProtonConfig cfg = configFromNumCoresSetting(4.5, 3); assertEquals(15, cfg.numsearcherthreads()); assertEquals(5, cfg.numsummarythreads()); assertEquals(3, cfg.numthreadspersearch()); } @Test public void require_that_fast_disk_is_reflected_in_proton_config() { ProtonConfig cfg = configFromDiskSetting(true); assertEquals(200, cfg.hwinfo().disk().writespeed(), delta); assertEquals(100, cfg.hwinfo().disk().slowwritespeedlimit(), delta); } @Test public void require_that_slow_disk_is_reflected_in_proton_config() { ProtonConfig cfg = configFromDiskSetting(false); assertEquals(40, cfg.hwinfo().disk().writespeed(), delta); assertEquals(100, cfg.hwinfo().disk().slowwritespeedlimit(), delta); } @Test public void require_that_document_store_maxfilesize_is_set_based_on_available_memory() { assertDocumentStoreMaxFileSize(256 * MB, 4); assertDocumentStoreMaxFileSize(256 * MB, 6); assertDocumentStoreMaxFileSize(256 * MB, 8); assertDocumentStoreMaxFileSize(256 * MB, 12); assertDocumentStoreMaxFileSize(512 * MB, 16); assertDocumentStoreMaxFileSize(1 * GB, 24); assertDocumentStoreMaxFileSize(1 * GB, 32); assertDocumentStoreMaxFileSize(1 * GB, 48); assertDocumentStoreMaxFileSize(1 * GB, 64); assertDocumentStoreMaxFileSize(4 * GB, 128); assertDocumentStoreMaxFileSize(4 * GB, 256); assertDocumentStoreMaxFileSize(4 * GB, 512); } @Test public void require_that_flush_strategy_memory_limits_are_set_based_on_available_memory() { assertFlushStrategyMemory(512 * MB, 4); assertFlushStrategyMemory(1 * GB, 8); assertFlushStrategyMemory(3 * GB, 24); assertFlushStrategyMemory(8 * GB, 64); } @Test public void require_that_flush_strategy_tls_size_is_set_based_on_available_disk() { assertFlushStrategyTlsSize(2 * GB, 10, 0.05); assertFlushStrategyTlsSize(7 * GB, 100, 0.07); assertFlushStrategyTlsSize(5 * GB, 100, 0.05); assertFlushStrategyTlsSize(35 * GB, 500, 0.07); assertFlushStrategyTlsSize(84 * GB, 1200, 0.07); assertFlushStrategyTlsSize(100 * GB, 1720, 0.07); assertFlushStrategyTlsSize(100 * GB, 24000, 0.07); } @Test public void require_that_summary_read_io_is_set_based_on_disk() { assertSummaryReadIo(ProtonConfig.Summary.Read.Io.DIRECTIO, true); assertSummaryReadIo(ProtonConfig.Summary.Read.Io.MMAP, false); } @Test public void require_that_search_read_mmap_advise_is_set_based_on_disk() { assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.RANDOM, true); assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.NORMAL, false); } @Test public void require_that_summary_cache_max_bytes_is_set_based_on_memory() { assertEquals(1*GB / 20, configFromMemorySetting(1 + reservedMemoryGb, 0).summary().cache().maxbytes()); assertEquals(256*GB / 20, configFromMemorySetting(256 + reservedMemoryGb, 0).summary().cache().maxbytes()); } @Test public void require_that_summary_cache_memory_is_reduced_with_combined_cluster() { assertEquals(combinedFactor * 1*GB / 20, configFromMemorySetting(1 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).summary().cache().maxbytes(), 1000); assertEquals(combinedFactor * 256*GB / 20, configFromMemorySetting(256 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).summary().cache().maxbytes(), 1000); } @Test public void require_that_docker_node_is_tagged_with_shared_disk() { assertSharedDisk(true, true); } private static void assertDocumentStoreMaxFileSize(long expFileSizeBytes, int wantedMemoryGb) { assertEquals(expFileSizeBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).summary().log().maxfilesize()); } private static void assertFlushStrategyMemory(long expMemoryBytes, int wantedMemoryGb) { assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().maxmemory()); assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().each().maxmemory()); } private static void assertFlushStrategyTlsSize(long expTlsSizeBytes, int diskGb, double tlsSizeFraction) { assertEquals(expTlsSizeBytes, configFromDiskSetting(diskGb, tlsSizeFraction).flush().memory().maxtlssize()); } private static void assertSummaryReadIo(ProtonConfig.Summary.Read.Io.Enum expValue, boolean fastDisk) { assertEquals(expValue, configFromDiskSetting(fastDisk).summary().read().io()); } private static void assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.Enum expValue, boolean fastDisk) { assertEquals(expValue, configFromDiskSetting(fastDisk).search().mmap().advise()); } private static void assertSharedDisk(boolean sharedDisk, boolean docker) { assertEquals(sharedDisk, configFromEnvironmentType(docker).hwinfo().disk().shared()); } private static void assertWriteFilter(double expMemoryLimit, int memoryGb) { assertEquals(expMemoryLimit, configFromMemorySetting(memoryGb, 0).writefilter().memorylimit(), delta); } private static ProtonConfig configFromDiskSetting(boolean fastDisk) { return getConfig(new FlavorsConfig.Flavor.Builder().fastDisk(fastDisk)); } private static ProtonConfig configFromDiskSetting(int diskGb, double tlsSizeFraction) { return getConfig(new FlavorsConfig.Flavor.Builder().minDiskAvailableGb(diskGb), 0, tlsSizeFraction); } private static ProtonConfig configFromMemorySetting(double memoryGb, double fractionOfMemoryReserved) { return getConfig(new FlavorsConfig.Flavor.Builder().minMainMemoryAvailableGb(memoryGb), fractionOfMemoryReserved, 0.07); } private static ProtonConfig configFromMemorySetting(double memoryGb, ProtonConfig.Builder builder) { return getConfig(new FlavorsConfig.Flavor.Builder() .minMainMemoryAvailableGb(memoryGb), builder); } private static ProtonConfig configFromNumCoresSetting(double numCores) { return getConfig(new FlavorsConfig.Flavor.Builder().minCpuCores(numCores)); } private static ProtonConfig configFromNumCoresSetting(double numCores, int numThreadsPerSearch) { return getConfig(new FlavorsConfig.Flavor.Builder().minCpuCores(numCores), new ProtonConfig.Builder(), numThreadsPerSearch); } private static ProtonConfig configFromEnvironmentType(boolean docker) { String environment = (docker ? "DOCKER_CONTAINER" : "undefined"); return getConfig(new FlavorsConfig.Flavor.Builder().environment(environment)); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder) { return getConfig(flavorBuilder, new ProtonConfig.Builder()); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, double fractionOfMemoryReserved, double tlsSizeFraction) { return getConfig(flavorBuilder, new ProtonConfig.Builder(), fractionOfMemoryReserved, tlsSizeFraction); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder) { return getConfig(flavorBuilder, protonBuilder,1); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, double fractionOfMemoryReserved, double tlsSizeFraction) { return getConfig(flavorBuilder, protonBuilder, 1, fractionOfMemoryReserved, tlsSizeFraction); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, int numThreadsPerSearch) { return getConfig(flavorBuilder, protonBuilder, numThreadsPerSearch, 0, 0.07); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, int numThreadsPerSearch, double fractionOfMemoryReserved, double tlsSizeFraction) { flavorBuilder.name("my_flavor"); NodeResourcesTuning tuning = new NodeResourcesTuning(new Flavor(new FlavorsConfig.Flavor(flavorBuilder)).resources(), numThreadsPerSearch, fractionOfMemoryReserved, tlsSizeFraction); tuning.getConfig(protonBuilder); return new ProtonConfig(protonBuilder); } }
Fixed
private static ProtonConfig configFromDiskSetting(int diskGb) { return configFromDiskSetting(diskGb, 0.05); }
return configFromDiskSetting(diskGb, 0.05);
private static ProtonConfig configFromDiskSetting(int diskGb) { return configFromDiskSetting(diskGb, 0.07); }
class NodeResourcesTuningTest { private static final double delta = 0.00001; private static final double combinedFactor = 1 - 18.0/100; @Test public void require_that_hwinfo_disk_size_is_set() { ProtonConfig cfg = configFromDiskSetting(100); assertEquals(100 * GB, cfg.hwinfo().disk().size()); } @Test public void require_that_hwinfo_memory_size_is_set() { assertEquals(24 * GB, configFromMemorySetting(24 + reservedMemoryGb, 0).hwinfo().memory().size()); assertEquals(combinedFactor * 24 * GB, configFromMemorySetting(24 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).hwinfo().memory().size(), 1000); } @Test public void reserved_memory_on_content_node_is_0_5_gb() { assertEquals(0.5, reservedMemoryGb, delta); } private ProtonConfig getProtonMemoryConfig(List<Pair<String, String>> sdAndMode, double gb, int redundancy, int searchableCopies) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); for (Pair<String, String> sdMode : sdAndMode) { builder.documentdb.add(new ProtonConfig.Documentdb.Builder() .inputdoctypename(sdMode.getFirst()) .configid("some/config/id/" + sdMode.getFirst()) .mode(ProtonConfig.Documentdb.Mode.Enum.valueOf(sdMode.getSecond()))); } return configFromMemorySetting(gb, builder); } private void verify_that_initial_numdocs_is_dependent_of_mode(int redundancy, int searchablecopies) { ProtonConfig cfg = getProtonMemoryConfig(Arrays.asList(new Pair<>("a", "INDEX"), new Pair<>("b", "STREAMING"), new Pair<>("c", "STORE_ONLY")), 24 + reservedMemoryGb, redundancy, searchablecopies); assertEquals(3, cfg.documentdb().size()); assertEquals(1024, cfg.documentdb(0).allocation().initialnumdocs()); assertEquals("a", cfg.documentdb(0).inputdoctypename()); assertEquals(24 * GB / 46, cfg.documentdb(1).allocation().initialnumdocs()); assertEquals("b", cfg.documentdb(1).inputdoctypename()); assertEquals(24 * GB / 46, cfg.documentdb(2).allocation().initialnumdocs()); assertEquals("c", cfg.documentdb(2).inputdoctypename()); } @Test public void require_that_initial_numdocs_is_dependent_of_mode_and_searchablecopies() { verify_that_initial_numdocs_is_dependent_of_mode(2,0); verify_that_initial_numdocs_is_dependent_of_mode(1,1); verify_that_initial_numdocs_is_dependent_of_mode(3, 2); verify_that_initial_numdocs_is_dependent_of_mode(3, 3); } @Test public void require_that_hwinfo_cpu_cores_is_set() { ProtonConfig cfg = configFromNumCoresSetting(24); assertEquals(24, cfg.hwinfo().cpu().cores()); } @Test public void require_that_num_search_threads_and_summary_threads_follow_cores() { ProtonConfig cfg = configFromNumCoresSetting(4.5); assertEquals(5, cfg.numsearcherthreads()); assertEquals(5, cfg.numsummarythreads()); assertEquals(1, cfg.numthreadspersearch()); } @Test public void require_that_num_search_threads_and_considers_explict_num_threads_per_search() { ProtonConfig cfg = configFromNumCoresSetting(4.5, 3); assertEquals(15, cfg.numsearcherthreads()); assertEquals(5, cfg.numsummarythreads()); assertEquals(3, cfg.numthreadspersearch()); } @Test public void require_that_fast_disk_is_reflected_in_proton_config() { ProtonConfig cfg = configFromDiskSetting(true); assertEquals(200, cfg.hwinfo().disk().writespeed(), delta); assertEquals(100, cfg.hwinfo().disk().slowwritespeedlimit(), delta); } @Test public void require_that_slow_disk_is_reflected_in_proton_config() { ProtonConfig cfg = configFromDiskSetting(false); assertEquals(40, cfg.hwinfo().disk().writespeed(), delta); assertEquals(100, cfg.hwinfo().disk().slowwritespeedlimit(), delta); } @Test public void require_that_document_store_maxfilesize_is_set_based_on_available_memory() { assertDocumentStoreMaxFileSize(256 * MB, 4); assertDocumentStoreMaxFileSize(256 * MB, 6); assertDocumentStoreMaxFileSize(256 * MB, 8); assertDocumentStoreMaxFileSize(256 * MB, 12); assertDocumentStoreMaxFileSize(512 * MB, 16); assertDocumentStoreMaxFileSize(1 * GB, 24); assertDocumentStoreMaxFileSize(1 * GB, 32); assertDocumentStoreMaxFileSize(1 * GB, 48); assertDocumentStoreMaxFileSize(1 * GB, 64); assertDocumentStoreMaxFileSize(4 * GB, 128); assertDocumentStoreMaxFileSize(4 * GB, 256); assertDocumentStoreMaxFileSize(4 * GB, 512); } @Test public void require_that_flush_strategy_memory_limits_are_set_based_on_available_memory() { assertFlushStrategyMemory(512 * MB, 4); assertFlushStrategyMemory(1 * GB, 8); assertFlushStrategyMemory(3 * GB, 24); assertFlushStrategyMemory(8 * GB, 64); } @Test public void require_that_flush_strategy_tls_size_is_set_based_on_available_disk() { assertFlushStrategyTlsSize(2 * GB, 10, 0.05); assertFlushStrategyTlsSize(7 * GB, 100, 0.07); assertFlushStrategyTlsSize(5 * GB, 100, 0.05); assertFlushStrategyTlsSize(35 * GB, 500, 0.07); assertFlushStrategyTlsSize(84 * GB, 1200, 0.07); assertFlushStrategyTlsSize(100 * GB, 1720, 0.07); assertFlushStrategyTlsSize(100 * GB, 24000, 0.07); } @Test public void require_that_summary_read_io_is_set_based_on_disk() { assertSummaryReadIo(ProtonConfig.Summary.Read.Io.DIRECTIO, true); assertSummaryReadIo(ProtonConfig.Summary.Read.Io.MMAP, false); } @Test public void require_that_search_read_mmap_advise_is_set_based_on_disk() { assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.RANDOM, true); assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.NORMAL, false); } @Test public void require_that_summary_cache_max_bytes_is_set_based_on_memory() { assertEquals(1*GB / 20, configFromMemorySetting(1 + reservedMemoryGb, 0).summary().cache().maxbytes()); assertEquals(256*GB / 20, configFromMemorySetting(256 + reservedMemoryGb, 0).summary().cache().maxbytes()); } @Test public void require_that_summary_cache_memory_is_reduced_with_combined_cluster() { assertEquals(combinedFactor * 1*GB / 20, configFromMemorySetting(1 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).summary().cache().maxbytes(), 1000); assertEquals(combinedFactor * 256*GB / 20, configFromMemorySetting(256 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).summary().cache().maxbytes(), 1000); } @Test public void require_that_docker_node_is_tagged_with_shared_disk() { assertSharedDisk(true, true); } private static void assertDocumentStoreMaxFileSize(long expFileSizeBytes, int wantedMemoryGb) { assertEquals(expFileSizeBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).summary().log().maxfilesize()); } private static void assertFlushStrategyMemory(long expMemoryBytes, int wantedMemoryGb) { assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().maxmemory()); assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().each().maxmemory()); } private static void assertFlushStrategyTlsSize(long expTlsSizeBytes, int diskGb, double tlsSizeFraction) { assertEquals(expTlsSizeBytes, configFromDiskSetting(diskGb, tlsSizeFraction).flush().memory().maxtlssize()); } private static void assertSummaryReadIo(ProtonConfig.Summary.Read.Io.Enum expValue, boolean fastDisk) { assertEquals(expValue, configFromDiskSetting(fastDisk).summary().read().io()); } private static void assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.Enum expValue, boolean fastDisk) { assertEquals(expValue, configFromDiskSetting(fastDisk).search().mmap().advise()); } private static void assertSharedDisk(boolean sharedDisk, boolean docker) { assertEquals(sharedDisk, configFromEnvironmentType(docker).hwinfo().disk().shared()); } private static void assertWriteFilter(double expMemoryLimit, int memoryGb) { assertEquals(expMemoryLimit, configFromMemorySetting(memoryGb, 0).writefilter().memorylimit(), delta); } private static ProtonConfig configFromDiskSetting(boolean fastDisk) { return getConfig(new FlavorsConfig.Flavor.Builder().fastDisk(fastDisk)); } private static ProtonConfig configFromDiskSetting(int diskGb, double tlsSizeFraction) { return getConfig(new FlavorsConfig.Flavor.Builder().minDiskAvailableGb(diskGb), 0, tlsSizeFraction); } private static ProtonConfig configFromMemorySetting(double memoryGb, double fractionOfMemoryReserved) { return getConfig(new FlavorsConfig.Flavor.Builder().minMainMemoryAvailableGb(memoryGb), fractionOfMemoryReserved, 0.07); } private static ProtonConfig configFromMemorySetting(double memoryGb, ProtonConfig.Builder builder) { return getConfig(new FlavorsConfig.Flavor.Builder() .minMainMemoryAvailableGb(memoryGb), builder); } private static ProtonConfig configFromNumCoresSetting(double numCores) { return getConfig(new FlavorsConfig.Flavor.Builder().minCpuCores(numCores)); } private static ProtonConfig configFromNumCoresSetting(double numCores, int numThreadsPerSearch) { return getConfig(new FlavorsConfig.Flavor.Builder().minCpuCores(numCores), new ProtonConfig.Builder(), numThreadsPerSearch); } private static ProtonConfig configFromEnvironmentType(boolean docker) { String environment = (docker ? "DOCKER_CONTAINER" : "undefined"); return getConfig(new FlavorsConfig.Flavor.Builder().environment(environment)); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder) { return getConfig(flavorBuilder, new ProtonConfig.Builder()); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, double fractionOfMemoryReserved, double tlsSizeFraction) { return getConfig(flavorBuilder, new ProtonConfig.Builder(), fractionOfMemoryReserved, tlsSizeFraction); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder) { return getConfig(flavorBuilder, protonBuilder,1); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, double fractionOfMemoryReserved, double tlsSizeFraction) { return getConfig(flavorBuilder, protonBuilder, 1, fractionOfMemoryReserved, tlsSizeFraction); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, int numThreadsPerSearch) { return getConfig(flavorBuilder, protonBuilder, numThreadsPerSearch, 0, 0.07); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, int numThreadsPerSearch, double fractionOfMemoryReserved, double tlsSizeFraction) { flavorBuilder.name("my_flavor"); NodeResourcesTuning tuning = new NodeResourcesTuning(new Flavor(new FlavorsConfig.Flavor(flavorBuilder)).resources(), numThreadsPerSearch, fractionOfMemoryReserved, tlsSizeFraction); tuning.getConfig(protonBuilder); return new ProtonConfig(protonBuilder); } }
class NodeResourcesTuningTest { private static final double delta = 0.00001; private static final double combinedFactor = 1 - 18.0/100; @Test public void require_that_hwinfo_disk_size_is_set() { ProtonConfig cfg = configFromDiskSetting(100); assertEquals(100 * GB, cfg.hwinfo().disk().size()); } @Test public void require_that_hwinfo_memory_size_is_set() { assertEquals(24 * GB, configFromMemorySetting(24 + reservedMemoryGb, 0).hwinfo().memory().size()); assertEquals(combinedFactor * 24 * GB, configFromMemorySetting(24 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).hwinfo().memory().size(), 1000); } @Test public void reserved_memory_on_content_node_is_0_5_gb() { assertEquals(0.5, reservedMemoryGb, delta); } private ProtonConfig getProtonMemoryConfig(List<Pair<String, String>> sdAndMode, double gb, int redundancy, int searchableCopies) { ProtonConfig.Builder builder = new ProtonConfig.Builder(); for (Pair<String, String> sdMode : sdAndMode) { builder.documentdb.add(new ProtonConfig.Documentdb.Builder() .inputdoctypename(sdMode.getFirst()) .configid("some/config/id/" + sdMode.getFirst()) .mode(ProtonConfig.Documentdb.Mode.Enum.valueOf(sdMode.getSecond()))); } return configFromMemorySetting(gb, builder); } private void verify_that_initial_numdocs_is_dependent_of_mode(int redundancy, int searchablecopies) { ProtonConfig cfg = getProtonMemoryConfig(Arrays.asList(new Pair<>("a", "INDEX"), new Pair<>("b", "STREAMING"), new Pair<>("c", "STORE_ONLY")), 24 + reservedMemoryGb, redundancy, searchablecopies); assertEquals(3, cfg.documentdb().size()); assertEquals(1024, cfg.documentdb(0).allocation().initialnumdocs()); assertEquals("a", cfg.documentdb(0).inputdoctypename()); assertEquals(24 * GB / 46, cfg.documentdb(1).allocation().initialnumdocs()); assertEquals("b", cfg.documentdb(1).inputdoctypename()); assertEquals(24 * GB / 46, cfg.documentdb(2).allocation().initialnumdocs()); assertEquals("c", cfg.documentdb(2).inputdoctypename()); } @Test public void require_that_initial_numdocs_is_dependent_of_mode_and_searchablecopies() { verify_that_initial_numdocs_is_dependent_of_mode(2,0); verify_that_initial_numdocs_is_dependent_of_mode(1,1); verify_that_initial_numdocs_is_dependent_of_mode(3, 2); verify_that_initial_numdocs_is_dependent_of_mode(3, 3); } @Test public void require_that_hwinfo_cpu_cores_is_set() { ProtonConfig cfg = configFromNumCoresSetting(24); assertEquals(24, cfg.hwinfo().cpu().cores()); } @Test public void require_that_num_search_threads_and_summary_threads_follow_cores() { ProtonConfig cfg = configFromNumCoresSetting(4.5); assertEquals(5, cfg.numsearcherthreads()); assertEquals(5, cfg.numsummarythreads()); assertEquals(1, cfg.numthreadspersearch()); } @Test public void require_that_num_search_threads_and_considers_explict_num_threads_per_search() { ProtonConfig cfg = configFromNumCoresSetting(4.5, 3); assertEquals(15, cfg.numsearcherthreads()); assertEquals(5, cfg.numsummarythreads()); assertEquals(3, cfg.numthreadspersearch()); } @Test public void require_that_fast_disk_is_reflected_in_proton_config() { ProtonConfig cfg = configFromDiskSetting(true); assertEquals(200, cfg.hwinfo().disk().writespeed(), delta); assertEquals(100, cfg.hwinfo().disk().slowwritespeedlimit(), delta); } @Test public void require_that_slow_disk_is_reflected_in_proton_config() { ProtonConfig cfg = configFromDiskSetting(false); assertEquals(40, cfg.hwinfo().disk().writespeed(), delta); assertEquals(100, cfg.hwinfo().disk().slowwritespeedlimit(), delta); } @Test public void require_that_document_store_maxfilesize_is_set_based_on_available_memory() { assertDocumentStoreMaxFileSize(256 * MB, 4); assertDocumentStoreMaxFileSize(256 * MB, 6); assertDocumentStoreMaxFileSize(256 * MB, 8); assertDocumentStoreMaxFileSize(256 * MB, 12); assertDocumentStoreMaxFileSize(512 * MB, 16); assertDocumentStoreMaxFileSize(1 * GB, 24); assertDocumentStoreMaxFileSize(1 * GB, 32); assertDocumentStoreMaxFileSize(1 * GB, 48); assertDocumentStoreMaxFileSize(1 * GB, 64); assertDocumentStoreMaxFileSize(4 * GB, 128); assertDocumentStoreMaxFileSize(4 * GB, 256); assertDocumentStoreMaxFileSize(4 * GB, 512); } @Test public void require_that_flush_strategy_memory_limits_are_set_based_on_available_memory() { assertFlushStrategyMemory(512 * MB, 4); assertFlushStrategyMemory(1 * GB, 8); assertFlushStrategyMemory(3 * GB, 24); assertFlushStrategyMemory(8 * GB, 64); } @Test public void require_that_flush_strategy_tls_size_is_set_based_on_available_disk() { assertFlushStrategyTlsSize(2 * GB, 10, 0.05); assertFlushStrategyTlsSize(7 * GB, 100, 0.07); assertFlushStrategyTlsSize(5 * GB, 100, 0.05); assertFlushStrategyTlsSize(35 * GB, 500, 0.07); assertFlushStrategyTlsSize(84 * GB, 1200, 0.07); assertFlushStrategyTlsSize(100 * GB, 1720, 0.07); assertFlushStrategyTlsSize(100 * GB, 24000, 0.07); } @Test public void require_that_summary_read_io_is_set_based_on_disk() { assertSummaryReadIo(ProtonConfig.Summary.Read.Io.DIRECTIO, true); assertSummaryReadIo(ProtonConfig.Summary.Read.Io.MMAP, false); } @Test public void require_that_search_read_mmap_advise_is_set_based_on_disk() { assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.RANDOM, true); assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.NORMAL, false); } @Test public void require_that_summary_cache_max_bytes_is_set_based_on_memory() { assertEquals(1*GB / 20, configFromMemorySetting(1 + reservedMemoryGb, 0).summary().cache().maxbytes()); assertEquals(256*GB / 20, configFromMemorySetting(256 + reservedMemoryGb, 0).summary().cache().maxbytes()); } @Test public void require_that_summary_cache_memory_is_reduced_with_combined_cluster() { assertEquals(combinedFactor * 1*GB / 20, configFromMemorySetting(1 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).summary().cache().maxbytes(), 1000); assertEquals(combinedFactor * 256*GB / 20, configFromMemorySetting(256 + reservedMemoryGb, ApplicationContainerCluster.heapSizePercentageOfTotalNodeMemoryWhenCombinedCluster*0.01).summary().cache().maxbytes(), 1000); } @Test public void require_that_docker_node_is_tagged_with_shared_disk() { assertSharedDisk(true, true); } private static void assertDocumentStoreMaxFileSize(long expFileSizeBytes, int wantedMemoryGb) { assertEquals(expFileSizeBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).summary().log().maxfilesize()); } private static void assertFlushStrategyMemory(long expMemoryBytes, int wantedMemoryGb) { assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().maxmemory()); assertEquals(expMemoryBytes, configFromMemorySetting(wantedMemoryGb + reservedMemoryGb, 0).flush().memory().each().maxmemory()); } private static void assertFlushStrategyTlsSize(long expTlsSizeBytes, int diskGb, double tlsSizeFraction) { assertEquals(expTlsSizeBytes, configFromDiskSetting(diskGb, tlsSizeFraction).flush().memory().maxtlssize()); } private static void assertSummaryReadIo(ProtonConfig.Summary.Read.Io.Enum expValue, boolean fastDisk) { assertEquals(expValue, configFromDiskSetting(fastDisk).summary().read().io()); } private static void assertSearchReadAdvise(ProtonConfig.Search.Mmap.Advise.Enum expValue, boolean fastDisk) { assertEquals(expValue, configFromDiskSetting(fastDisk).search().mmap().advise()); } private static void assertSharedDisk(boolean sharedDisk, boolean docker) { assertEquals(sharedDisk, configFromEnvironmentType(docker).hwinfo().disk().shared()); } private static void assertWriteFilter(double expMemoryLimit, int memoryGb) { assertEquals(expMemoryLimit, configFromMemorySetting(memoryGb, 0).writefilter().memorylimit(), delta); } private static ProtonConfig configFromDiskSetting(boolean fastDisk) { return getConfig(new FlavorsConfig.Flavor.Builder().fastDisk(fastDisk)); } private static ProtonConfig configFromDiskSetting(int diskGb, double tlsSizeFraction) { return getConfig(new FlavorsConfig.Flavor.Builder().minDiskAvailableGb(diskGb), 0, tlsSizeFraction); } private static ProtonConfig configFromMemorySetting(double memoryGb, double fractionOfMemoryReserved) { return getConfig(new FlavorsConfig.Flavor.Builder().minMainMemoryAvailableGb(memoryGb), fractionOfMemoryReserved, 0.07); } private static ProtonConfig configFromMemorySetting(double memoryGb, ProtonConfig.Builder builder) { return getConfig(new FlavorsConfig.Flavor.Builder() .minMainMemoryAvailableGb(memoryGb), builder); } private static ProtonConfig configFromNumCoresSetting(double numCores) { return getConfig(new FlavorsConfig.Flavor.Builder().minCpuCores(numCores)); } private static ProtonConfig configFromNumCoresSetting(double numCores, int numThreadsPerSearch) { return getConfig(new FlavorsConfig.Flavor.Builder().minCpuCores(numCores), new ProtonConfig.Builder(), numThreadsPerSearch); } private static ProtonConfig configFromEnvironmentType(boolean docker) { String environment = (docker ? "DOCKER_CONTAINER" : "undefined"); return getConfig(new FlavorsConfig.Flavor.Builder().environment(environment)); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder) { return getConfig(flavorBuilder, new ProtonConfig.Builder()); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, double fractionOfMemoryReserved, double tlsSizeFraction) { return getConfig(flavorBuilder, new ProtonConfig.Builder(), fractionOfMemoryReserved, tlsSizeFraction); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder) { return getConfig(flavorBuilder, protonBuilder,1); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, double fractionOfMemoryReserved, double tlsSizeFraction) { return getConfig(flavorBuilder, protonBuilder, 1, fractionOfMemoryReserved, tlsSizeFraction); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, int numThreadsPerSearch) { return getConfig(flavorBuilder, protonBuilder, numThreadsPerSearch, 0, 0.07); } private static ProtonConfig getConfig(FlavorsConfig.Flavor.Builder flavorBuilder, ProtonConfig.Builder protonBuilder, int numThreadsPerSearch, double fractionOfMemoryReserved, double tlsSizeFraction) { flavorBuilder.name("my_flavor"); NodeResourcesTuning tuning = new NodeResourcesTuning(new Flavor(new FlavorsConfig.Flavor(flavorBuilder)).resources(), numThreadsPerSearch, fractionOfMemoryReserved, tlsSizeFraction); tuning.getConfig(protonBuilder); return new ProtonConfig(protonBuilder); } }
```suggestion throw new UncheckedIOException(e); ```
public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { try (InputStream logs = getVespaLogsFromLogserver(run, 0, false)) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedTimeoutException(e); } } if (id.type().isTest()) { try (InputStream logs = getVespaLogsFromLogserver(run, 0, true)) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch (IOException e) { throw new UncheckedIOException(e); } } }
throw new UncheckedTimeoutException(e);
public void storeVespaLogs(RunId id) { Run run = run(id); if ( ! id.type().isProduction()) { try (InputStream logs = getVespaLogsFromLogserver(run, 0, false)) { controller.serviceRegistry().runDataStore().putLogs(id, false, logs); } catch (IOException e) { throw new UncheckedIOException(e); } } if (id.type().isTest()) { try (InputStream logs = getVespaLogsFromLogserver(run, 0, true)) { controller.serviceRegistry().runDataStore().putLogs(id, true, logs); } catch (IOException e) { throw new UncheckedIOException(e); } } }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime); } private InputStream getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { long deploymentCompletedAtMillis = deploymentCompletedAt(run, tester).orElse(Instant.EPOCH).toEpochMilli(); return controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, deploymentCompletedAtMillis)), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli()))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final Duration maxHistoryAge = Duration.ofDays(60); private static final Logger log = Logger.getLogger(JobController.class.getName()); private final int historyLength; private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.historyLength = controller.system().isCd() ? 256 : 64; this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.metric = new JobMetrics(controller.metric(), controller::system); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Mutex __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries, true); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; storeVespaLogs(id); ZoneId zone = id.type().zone(); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; List<LogEntry> log; Instant deployedAt; Instant from; if ( ! run.id().type().isProduction()) { deployedAt = run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal)).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); } else log = List.of(); if (id.type().isTest()) { deployedAt = run.stepInfo(installTester).flatMap(StepInfo::startTime).orElseThrow(); from = run.lastVespaLogTimestamp().isAfter(run.start()) ? run.lastVespaLogTimestamp() : deployedAt.minusSeconds(10); List<LogEntry> testerLog = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.tester().id(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); Instant justNow = controller.clock().instant().minusSeconds(2); log = Stream.concat(log.stream(), testerLog.stream()) .filter(entry -> entry.at().isBefore(justNow)) .sorted(comparing(LogEntry::at)) .collect(toUnmodifiableList()); } if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log, false); return run.with(log.get(log.size() - 1).at()); }); } public InputStream getVespaLogs(RunId id, long fromMillis, boolean tester) { Run run = run(id); return run.stepStatus(copyVespaLogs).map(succeeded::equals).orElse(false) ? controller.serviceRegistry().runDataStore().getLogs(id, tester) : getVespaLogsFromLogserver(run, fromMillis, tester); } public static Optional<Instant> deploymentCompletedAt(Run run, boolean tester) { return (tester ? run.stepInfo(installTester) : run.stepInfo(installInitialReal).or(() -> run.stepInfo(installReal))) .flatMap(StepInfo::startTime); } private InputStream getVespaLogsFromLogserver(Run run, long fromMillis, boolean tester) { long deploymentCompletedAtMillis = deploymentCompletedAt(run, tester).orElse(Instant.EPOCH).toEpochMilli(); return controller.serviceRegistry().configServer().getLogs(new DeploymentId(tester ? run.id().tester().id() : run.id().application(), run.id().type().zone()), Map.of("from", Long.toString(Math.max(fromMillis, deploymentCompletedAtMillis)), "to", Long.toString(run.end().orElse(controller.clock().instant()).toEpochMilli()))); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone()), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries, false); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone())); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); } public Optional<String> getTestReports(RunId id) { return logs.readTestReports(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .collect(toUnmodifiableList()); } /** Returns all job types which have been run for the given application. */ private List<JobType> jobs(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .filter(type -> last(id, type).isPresent()) .collect(toUnmodifiableList()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Lists the start time of non-redeployment runs of the given job, in order of increasing age. */ public List<Instant> jobStarts(JobId id) { return runs(id).descendingMap().values().stream() .filter(run -> ! run.isRedeployment()) .map(Run::start) .collect(toUnmodifiableList()); } /** Returns when given deployment last started deploying, falling back to time of deployment if it cannot be determined from job runs */ public Instant lastDeploymentStart(ApplicationId instanceId, Deployment deployment) { return jobStarts(new JobId(instanceId, JobType.deploymentTo(deployment.zone()))).stream() .findFirst() .orElseGet(deployment::at); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, or throws if no such run exists. */ public Run run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny() .orElseThrow(() -> new NoSuchElementException("no run with id '" + id + "' exists")); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .toList(); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .toList(); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return JobType.allIn(controller.zoneRegistry()).stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> !run.hasEnded()) .toList(); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatus(application, versionStatus, controller.systemVersion(versionStatus)); } private DeploymentStatus deploymentStatus(Application application, VersionStatus versionStatus, Version systemVersion) { return new DeploymentStatus(application, this::jobStatus, controller.zoneRegistry(), versionStatus, systemVersion, instance -> controller.applications().versionCompatibility(application.id().instance(instance)), controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, VersionStatus versionStatus) { Version systemVersion = controller.systemVersion(versionStatus); return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, versionStatus, systemVersion)) .toList()); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { VersionStatus versionStatus = controller.readVersionStatus(); return deploymentStatuses(applications, versionStatus); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** * Changes the status of the given run to inactive, and stores it as a historic run. * Throws TimeoutException if some step in this job is still being run. */ public void finish(RunId id) throws TimeoutException { Deque<Mutex> locks = new ArrayDeque<>(); try { Run unlockedRun = run(id); locks.push(curator.lock(id.application(), id.type(), report)); for (Step step : report.allPrerequisites(unlockedRun.steps().keySet())) locks.push(curator.lock(id.application(), id.type(), step)); locked(id, run -> { if (run.status() == reset) { for (Step step : run.steps().keySet()) log(id, step, INFO, List.of(" return run.reset(); } if (run.status() == running && run.stepStatuses().values().stream().anyMatch(not(succeeded::equals))) return run; Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(Run::hasSucceeded).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().hasSucceeded() && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); pruneRevisions(unlockedRun); return finishedRun; }); } finally { for (Mutex lock : locks) { try { lock.close(); } catch (Throwable t) { log.log(WARNING, "Failed to close the lock " + lock + ": the lock may or may not " + "have been released in ZooKeeper, and if not this controller " + "must be restarted to release the lock", t); } } } } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id, String reason) { locked(id, run -> { run.stepStatuses().entrySet().stream() .filter(entry -> entry.getValue() == unfinished) .forEach(entry -> log(id, entry.getKey(), INFO, "Aborting run: " + reason)); return run.aborted(); }); } /** Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Submission submission, long projectId) { ApplicationController applications = controller.applications(); AtomicReference<ApplicationVersion> version = new AtomicReference<>(); applications.lockApplicationOrThrow(id, application -> { Optional<ApplicationVersion> previousVersion = application.get().revisions().last(); Optional<ApplicationPackage> previousPackage = previousVersion.flatMap(previous -> applications.applicationStore().find(id.tenant(), id.application(), previous.buildNumber().getAsLong())) .map(ApplicationPackage::new); long previousBuild = previousVersion.map(latestVersion -> latestVersion.buildNumber().getAsLong()).orElse(0L); version.set(submission.toApplicationVersion(1 + previousBuild)); byte[] diff = previousPackage.map(previous -> ApplicationPackageDiff.diff(previous, submission.applicationPackage())) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(submission.applicationPackage())); applications.applicationStore().put(id.tenant(), id.application(), version.get().id(), submission.applicationPackage().zippedContent(), submission.testPackage(), diff); applications.applicationStore().putMeta(id.tenant(), id.application(), controller.clock().instant(), submission.applicationPackage().metaDataZip()); application = application.withProjectId(projectId == -1 ? OptionalLong.empty() : OptionalLong.of(projectId)); application = application.withRevisions(revisions -> revisions.with(version.get())); application = withPrunedPackages(application, version.get().id()); TestSummary testSummary = TestPackage.validateTests(submission.applicationPackage().deploymentSpec(), submission.testPackage()); if (testSummary.problems().isEmpty()) controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.testPackage); else controller.notificationsDb().setNotification(NotificationSource.from(id), Type.testPackage, Notification.Level.warning, testSummary.problems()); submission.applicationPackage().parentVersion().ifPresent(parent -> { if (parent.getMajor() < controller.readSystemVersion().getMajor()) controller.notificationsDb().setNotification(NotificationSource.from(id), Type.submission, Notification.Level.warning, "Parent version used to compile the application is on a " + "lower major version than the current Vespa Cloud version"); else controller.notificationsDb().removeNotification(NotificationSource.from(id), Type.submission); }); applications.storeWithUpdatedConfig(application, submission.applicationPackage()); if (application.get().projectId().isPresent()) applications.deploymentTrigger().triggerNewRevision(id); }); return version.get(); } private LockedApplication withPrunedPackages(LockedApplication application, RevisionId latest){ TenantAndApplicationId id = application.get().id(); Application wrapped = application.get(); RevisionId oldestDeployed = application.get().oldestDeployedRevision() .or(() -> wrapped.instances().values().stream() .flatMap(instance -> instance.change().revision().stream()) .min(naturalOrder())) .orElse(latest); controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); for (ApplicationVersion version : application.get().revisions().withPackage()) if (version.id().compareTo(oldestDeployed) < 0) application = application.withRevisions(revisions -> revisions.with(version.withoutPackage())); return application; } /** Forget revisions no longer present in any relevant job history. */ private void pruneRevisions(Run run) { TenantAndApplicationId applicationId = TenantAndApplicationId.from(run.id().application()); boolean isProduction = run.versions().targetRevision().isProduction(); (isProduction ? deploymentStatus(controller.applications().requireApplication(applicationId)).jobs().asList().stream() : Stream.of(jobStatus(run.id().job()))) .flatMap(jobs -> jobs.runs().values().stream()) .map(r -> r.versions().targetRevision()) .filter(id -> id.isProduction() == isProduction) .min(naturalOrder()) .ifPresent(oldestRevision -> { controller.applications().lockApplicationOrThrow(applicationId, application -> { if (isProduction) { controller.applications().applicationStore().pruneDiffs(run.id().application().tenant(), run.id().application().application(), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision))); } else { controller.applications().applicationStore().pruneDevDiffs(new DeploymentId(run.id().application(), run.id().job().type().zone()), oldestRevision.number()); controller.applications().store(application.withRevisions(revisions -> revisions.withoutOlderThan(oldestRevision, run.id().job()))); } }); }); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, Optional<String> reason) { start(id, type, versions, isRedeployment, JobProfile.of(type), reason); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, boolean isRedeployment, JobProfile profile, Optional<String> reason) { ApplicationVersion revision = controller.applications().requireApplication(TenantAndApplicationId.from(id)).revisions().get(versions.targetRevision()); if (revision.compileVersion() .map(version -> controller.applications().versionCompatibility(id).refuse(versions.targetPlatform(), version)) .orElse(false)) throw new IllegalArgumentException("Will not start " + type + " for " + id + " with incompatible platform version (" + versions.targetPlatform() + ") " + "and compile versions (" + revision.compileVersion().get() + ")"); locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalArgumentException("Cannot start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, isRedeployment, controller.clock().instant(), profile, reason)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { deploy(id, type, platform, applicationPackage, false); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment.*/ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage, boolean dryRun) { if ( ! controller.zoneRegistry().hasZone(type.zone())) throw new IllegalArgumentException(type.zone() + " is not present in this system"); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); DeploymentId deploymentId = new DeploymentId(id, type.zone()); Optional<Run> lastRun = last(id, type); lastRun.filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id(), Duration.ofMinutes(2))); long build = 1 + lastRun.map(run -> run.versions().targetRevision().number()).orElse(0L); RevisionId revisionId = RevisionId.forDevelopment(build, new JobId(id, type)); ApplicationVersion version = ApplicationVersion.forDevelopment(revisionId, applicationPackage.compileVersion()); byte[] diff = getDiff(applicationPackage, deploymentId, lastRun); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(deploymentId, version.id(), applicationPackage.zippedContent(), diff); Version targetPlatform = platform.orElseGet(() -> findTargetPlatform(applicationPackage, deploymentId, application.get().get(id.instance()))); controller.applications().store(application.withRevisions(revisions -> revisions.with(version))); start(id, type, new Versions(targetPlatform, version.id(), lastRun.map(run -> run.versions().targetPlatform()), lastRun.map(run -> run.versions().targetRevision())), false, dryRun ? JobProfile.developmentDryRun : JobProfile.development, Optional.empty()); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /* Application package diff against previous version, or against empty version if previous does not exist or is invalid */ private byte[] getDiff(ApplicationPackage applicationPackage, DeploymentId deploymentId, Optional<Run> lastRun) { return lastRun.map(run -> run.versions().targetRevision()) .map(prevVersion -> { ApplicationPackage previous; try { previous = new ApplicationPackage(controller.applications().applicationStore().get(deploymentId, prevVersion)); } catch (IllegalArgumentException e) { return ApplicationPackageDiff.diffAgainstEmpty(applicationPackage); } return ApplicationPackageDiff.diff(previous, applicationPackage); }) .orElseGet(() -> ApplicationPackageDiff.diffAgainstEmpty(applicationPackage)); } private Version findTargetPlatform(ApplicationPackage applicationPackage, DeploymentId id, Optional<Instance> instance) { List<Version> versions = controller.readVersionStatus().deployableVersions().stream() .map(VespaVersion::versionNumber) .collect(toList()); instance.map(Instance::deployments) .map(deployments -> deployments.get(id.zoneId())) .map(Deployment::version) .ifPresent(versions::add); if (versions.isEmpty()) throw new IllegalStateException("no deployable platform version found in the system"); VersionCompatibility compatibility = controller.applications().versionCompatibility(id.applicationId()); List<Version> compatibleVersions = new ArrayList<>(); for (Version target : reversed(versions)) if (applicationPackage.compileVersion().isEmpty() || compatibility.accept(target, applicationPackage.compileVersion().get())) compatibleVersions.add(target); if (compatibleVersions.isEmpty()) throw new IllegalArgumentException("no platforms are compatible with compile version " + applicationPackage.compileVersion().get()); Optional<Integer> major = applicationPackage.deploymentSpec().majorVersion(); List<Version> versionOnRightMajor = new ArrayList<>(); for (Version target : reversed(versions)) if (major.isEmpty() || major.get() == target.getMajor()) versionOnRightMajor.add(target); if (versionOnRightMajor.isEmpty()) throw new IllegalArgumentException("no platforms were found for major version " + major.get() + " specified in deployment.xml"); for (Version target : compatibleVersions) if (versionOnRightMajor.contains(target)) return target; throw new IllegalArgumentException("no platforms on major version " + major.get() + " specified in deployment.xml " + "are compatible with compile version " + applicationPackage.compileVersion().get()); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id, Duration timeout) { abort(id, "replaced by new deployment"); runner.get().accept(last(id.application(), id.type()).get()); Instant doom = controller.clock().instant().plus(timeout); Duration sleep = Duration.ofMillis(100); while ( ! last(id.application(), id.type()).get().hasEnded()) { if (controller.clock().instant().plus(sleep).isAfter(doom)) throw new UncheckedTimeoutException("timeout waiting for " + id + " to abort and finish"); try { Thread.sleep(sleep.toMillis()); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Mutex ___ = curator.lock(id, type)) { try { deactivateTester(tester, type); } catch (Exception e) { } curator.deleteRunData(id, type); } }); logs.delete(id); curator.deleteRunData(id); } catch (Exception e) { log.log(WARNING, "failed cleaning up after deleted application", e); } }); } public void deactivateTester(TesterId id, JobType type) { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), type.zone())); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Mutex __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = new TreeMap<>(curator.readHistoricRuns(id, type)); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Mutex __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { Run modified = modifications.apply(run); if (modified != null) curator.writeLastRun(modified); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Mutex lock = curator.lock(id, type, step)) { for (Step prerequisite : step.allPrerequisites(last(id, type).get().steps().keySet())) try (Mutex __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
I think using explicit try/catch is a lot easier to read. try { some_code() fail(); } catch (IllegalArgumentException e) { assertXX }
public void requireThatInvalidJvmGcOptionsFailDeployment() throws IOException, SAXException { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage(containsString("Invalid JVM GC options from services.xml: bar,foo")); buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "gc-options", "-XX:+ParallelGCThreads=8 foo bar"); }
expectedException.expect(IllegalArgumentException.class);
public void requireThatInvalidJvmGcOptionsFailDeployment() throws IOException, SAXException { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage(containsString("Invalid JVM GC options from services.xml: bar,foo")); buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "gc-options", "-XX:+ParallelGCThreads=8 foo bar"); }
class JvmOptionsTest extends ContainerModelBuilderTestBase { @Rule public ExpectedException expectedException = ExpectedException.none(); @Test public void verify_jvm_tag_with_attributes() throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <search/>" + " <nodes>" + " <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts()); assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions()); } @Test public void detect_conflicting_jvmgcoptions_in_jvmargs() { assertFalse(ContainerModelBuilder.incompatibleGCOptions("")); assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19")); } @Test public void honours_jvm_gc_options() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <search/>", " <nodes jvm-gc-options='-XX:+UseG1GC'>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts()); } private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException { verifyIgnoreJvmGCOptionsIfJvmArgs("jvmargs", ContainerCluster.G1GC, isHosted); verifyIgnoreJvmGCOptionsIfJvmArgs( "jvm-options", "-XX:+UseG1GC", isHosted); } private static void verifyIgnoreJvmGCOptionsIfJvmArgs(String jvmOptionsName, String expectedGC, boolean isHosted) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expectedGC, qrStartConfig.jvm().gcopts()); } @Test public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException { verifyIgnoreJvmGCOptions(false); verifyIgnoreJvmGCOptions(true); } private void verifyJvmGCOptions(boolean isHosted, String featureFlagDefault, String override, String expected) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setJvmGCOptions(featureFlagDefault).setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expected, qrStartConfig.jvm().gcopts()); } @Test public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException { verifyJvmGCOptions(false, null, null, ContainerCluster.G1GC); verifyJvmGCOptions(true, null, null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(true, "", null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(false, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(true, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, "-XX:+UseParallelGC", "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseParallelGC", "-XX:+UseParallelGC"); } @Test public void requireThatInvalidJvmGcOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8 foo bar", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseCMSInitiatingOccupancyOnly foo bar", "-XX:+UseCMSInitiatingOccupancyOnly", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC"); verifyLoggingOfJvmGcOptions(true, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(false, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8"); verifyLoggingOfJvmGcOptions(true, "-XX:MaxTenuringThreshold"); verifyLoggingOfJvmGcOptions(false, "-XX:+UseConcMarkSweepGC"); } @Test private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException { verifyLoggingOfJvmOptions(isHosted, "gc-options", override, invalidOptions); } private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException { TestLogger logger = new TestLogger(); buildModelWithJvmOptions(isHosted, logger, optionName, override); List<String> strings = Arrays.asList(invalidOptions.clone()); if (strings.isEmpty()) return; Collections.sort(strings); Pair<Level, String> firstOption = logger.msgs.get(0); assertEquals(Level.WARNING, firstOption.getFirst()); assertEquals("Invalid JVM " + (optionName.equals("gc-options") ? "GC " : "") + "options from services.xml: " + String.join(",", strings), firstOption.getSecond()); } private void buildModelWithJvmOptions(boolean isHosted, TestLogger logger, String optionName, String override) throws IOException, SAXException { buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), logger, optionName, override); } private void buildModelWithJvmOptions(TestProperties properties, TestLogger logger, String optionName, String override) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes>" + " <jvm " + optionName + "='" + override + "'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build(); new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(app) .deployLogger(logger) .properties(properties) .build()); } @Test public void requireThatJvmOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmOptions(true, "options", "-Xms2G"); verifyLoggingOfJvmOptions(false, "options", "-Xms2G"); } }
class JvmOptionsTest extends ContainerModelBuilderTestBase { @Rule public ExpectedException expectedException = ExpectedException.none(); @Test public void verify_jvm_tag_with_attributes() throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <search/>" + " <nodes>" + " <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts()); assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions()); } @Test public void detect_conflicting_jvmgcoptions_in_jvmargs() { assertFalse(ContainerModelBuilder.incompatibleGCOptions("")); assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19")); } @Test public void honours_jvm_gc_options() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <search/>", " <nodes jvm-gc-options='-XX:+UseG1GC'>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts()); } private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException { verifyIgnoreJvmGCOptionsIfJvmArgs("jvmargs", ContainerCluster.G1GC, isHosted); verifyIgnoreJvmGCOptionsIfJvmArgs( "jvm-options", "-XX:+UseG1GC", isHosted); } private static void verifyIgnoreJvmGCOptionsIfJvmArgs(String jvmOptionsName, String expectedGC, boolean isHosted) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expectedGC, qrStartConfig.jvm().gcopts()); } @Test public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException { verifyIgnoreJvmGCOptions(false); verifyIgnoreJvmGCOptions(true); } private void verifyJvmGCOptions(boolean isHosted, String featureFlagDefault, String override, String expected) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setJvmGCOptions(featureFlagDefault).setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expected, qrStartConfig.jvm().gcopts()); } @Test public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException { verifyJvmGCOptions(false, null, null, ContainerCluster.G1GC); verifyJvmGCOptions(true, null, null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(true, "", null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(false, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(true, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, "-XX:+UseParallelGC", "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseParallelGC", "-XX:+UseParallelGC"); } @Test public void requireThatInvalidJvmGcOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8 foo bar", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseCMSInitiatingOccupancyOnly foo bar", "-XX:+UseCMSInitiatingOccupancyOnly", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC"); verifyLoggingOfJvmGcOptions(true, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(false, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8"); verifyLoggingOfJvmGcOptions(true, "-XX:MaxTenuringThreshold"); verifyLoggingOfJvmGcOptions(false, "-XX:+UseConcMarkSweepGC"); } @Test private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException { verifyLoggingOfJvmOptions(isHosted, "gc-options", override, invalidOptions); } private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException { TestLogger logger = new TestLogger(); buildModelWithJvmOptions(isHosted, logger, optionName, override); List<String> strings = Arrays.asList(invalidOptions.clone()); if (strings.isEmpty()) return; Collections.sort(strings); Pair<Level, String> firstOption = logger.msgs.get(0); assertEquals(Level.WARNING, firstOption.getFirst()); assertEquals("Invalid JVM " + (optionName.equals("gc-options") ? "GC " : "") + "options from services.xml: " + String.join(",", strings), firstOption.getSecond()); } private void buildModelWithJvmOptions(boolean isHosted, TestLogger logger, String optionName, String override) throws IOException, SAXException { buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), logger, optionName, override); } private void buildModelWithJvmOptions(TestProperties properties, TestLogger logger, String optionName, String override) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes>" + " <jvm " + optionName + "='" + override + "'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build(); new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(app) .deployLogger(logger) .properties(properties) .build()); } @Test public void requireThatJvmOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmOptions(true, "options", "-Xms2G"); verifyLoggingOfJvmOptions(false, "options", "-Xms2G"); } }
Hm, yeah, thanks for the input. I'll think about it the next time I'll need to test that an exception is thrown
public void requireThatInvalidJvmGcOptionsFailDeployment() throws IOException, SAXException { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage(containsString("Invalid JVM GC options from services.xml: bar,foo")); buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "gc-options", "-XX:+ParallelGCThreads=8 foo bar"); }
expectedException.expect(IllegalArgumentException.class);
public void requireThatInvalidJvmGcOptionsFailDeployment() throws IOException, SAXException { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage(containsString("Invalid JVM GC options from services.xml: bar,foo")); buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "gc-options", "-XX:+ParallelGCThreads=8 foo bar"); }
class JvmOptionsTest extends ContainerModelBuilderTestBase { @Rule public ExpectedException expectedException = ExpectedException.none(); @Test public void verify_jvm_tag_with_attributes() throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <search/>" + " <nodes>" + " <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts()); assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions()); } @Test public void detect_conflicting_jvmgcoptions_in_jvmargs() { assertFalse(ContainerModelBuilder.incompatibleGCOptions("")); assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19")); } @Test public void honours_jvm_gc_options() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <search/>", " <nodes jvm-gc-options='-XX:+UseG1GC'>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts()); } private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException { verifyIgnoreJvmGCOptionsIfJvmArgs("jvmargs", ContainerCluster.G1GC, isHosted); verifyIgnoreJvmGCOptionsIfJvmArgs( "jvm-options", "-XX:+UseG1GC", isHosted); } private static void verifyIgnoreJvmGCOptionsIfJvmArgs(String jvmOptionsName, String expectedGC, boolean isHosted) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expectedGC, qrStartConfig.jvm().gcopts()); } @Test public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException { verifyIgnoreJvmGCOptions(false); verifyIgnoreJvmGCOptions(true); } private void verifyJvmGCOptions(boolean isHosted, String featureFlagDefault, String override, String expected) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setJvmGCOptions(featureFlagDefault).setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expected, qrStartConfig.jvm().gcopts()); } @Test public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException { verifyJvmGCOptions(false, null, null, ContainerCluster.G1GC); verifyJvmGCOptions(true, null, null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(true, "", null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(false, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(true, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, "-XX:+UseParallelGC", "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseParallelGC", "-XX:+UseParallelGC"); } @Test public void requireThatInvalidJvmGcOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8 foo bar", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseCMSInitiatingOccupancyOnly foo bar", "-XX:+UseCMSInitiatingOccupancyOnly", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC"); verifyLoggingOfJvmGcOptions(true, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(false, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8"); verifyLoggingOfJvmGcOptions(true, "-XX:MaxTenuringThreshold"); verifyLoggingOfJvmGcOptions(false, "-XX:+UseConcMarkSweepGC"); } @Test private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException { verifyLoggingOfJvmOptions(isHosted, "gc-options", override, invalidOptions); } private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException { TestLogger logger = new TestLogger(); buildModelWithJvmOptions(isHosted, logger, optionName, override); List<String> strings = Arrays.asList(invalidOptions.clone()); if (strings.isEmpty()) return; Collections.sort(strings); Pair<Level, String> firstOption = logger.msgs.get(0); assertEquals(Level.WARNING, firstOption.getFirst()); assertEquals("Invalid JVM " + (optionName.equals("gc-options") ? "GC " : "") + "options from services.xml: " + String.join(",", strings), firstOption.getSecond()); } private void buildModelWithJvmOptions(boolean isHosted, TestLogger logger, String optionName, String override) throws IOException, SAXException { buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), logger, optionName, override); } private void buildModelWithJvmOptions(TestProperties properties, TestLogger logger, String optionName, String override) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes>" + " <jvm " + optionName + "='" + override + "'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build(); new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(app) .deployLogger(logger) .properties(properties) .build()); } @Test public void requireThatJvmOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmOptions(true, "options", "-Xms2G"); verifyLoggingOfJvmOptions(false, "options", "-Xms2G"); } }
class JvmOptionsTest extends ContainerModelBuilderTestBase { @Rule public ExpectedException expectedException = ExpectedException.none(); @Test public void verify_jvm_tag_with_attributes() throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <search/>" + " <nodes>" + " <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts()); assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions()); } @Test public void detect_conflicting_jvmgcoptions_in_jvmargs() { assertFalse(ContainerModelBuilder.incompatibleGCOptions("")); assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19")); } @Test public void honours_jvm_gc_options() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <search/>", " <nodes jvm-gc-options='-XX:+UseG1GC'>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts()); } private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException { verifyIgnoreJvmGCOptionsIfJvmArgs("jvmargs", ContainerCluster.G1GC, isHosted); verifyIgnoreJvmGCOptionsIfJvmArgs( "jvm-options", "-XX:+UseG1GC", isHosted); } private static void verifyIgnoreJvmGCOptionsIfJvmArgs(String jvmOptionsName, String expectedGC, boolean isHosted) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expectedGC, qrStartConfig.jvm().gcopts()); } @Test public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException { verifyIgnoreJvmGCOptions(false); verifyIgnoreJvmGCOptions(true); } private void verifyJvmGCOptions(boolean isHosted, String featureFlagDefault, String override, String expected) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setJvmGCOptions(featureFlagDefault).setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expected, qrStartConfig.jvm().gcopts()); } @Test public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException { verifyJvmGCOptions(false, null, null, ContainerCluster.G1GC); verifyJvmGCOptions(true, null, null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(true, "", null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(false, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(true, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, "-XX:+UseParallelGC", "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseParallelGC", "-XX:+UseParallelGC"); } @Test public void requireThatInvalidJvmGcOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8 foo bar", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseCMSInitiatingOccupancyOnly foo bar", "-XX:+UseCMSInitiatingOccupancyOnly", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC"); verifyLoggingOfJvmGcOptions(true, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(false, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8"); verifyLoggingOfJvmGcOptions(true, "-XX:MaxTenuringThreshold"); verifyLoggingOfJvmGcOptions(false, "-XX:+UseConcMarkSweepGC"); } @Test private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException { verifyLoggingOfJvmOptions(isHosted, "gc-options", override, invalidOptions); } private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException { TestLogger logger = new TestLogger(); buildModelWithJvmOptions(isHosted, logger, optionName, override); List<String> strings = Arrays.asList(invalidOptions.clone()); if (strings.isEmpty()) return; Collections.sort(strings); Pair<Level, String> firstOption = logger.msgs.get(0); assertEquals(Level.WARNING, firstOption.getFirst()); assertEquals("Invalid JVM " + (optionName.equals("gc-options") ? "GC " : "") + "options from services.xml: " + String.join(",", strings), firstOption.getSecond()); } private void buildModelWithJvmOptions(boolean isHosted, TestLogger logger, String optionName, String override) throws IOException, SAXException { buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), logger, optionName, override); } private void buildModelWithJvmOptions(TestProperties properties, TestLogger logger, String optionName, String override) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes>" + " <jvm " + optionName + "='" + override + "'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build(); new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(app) .deployLogger(logger) .properties(properties) .build()); } @Test public void requireThatJvmOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmOptions(true, "options", "-Xms2G"); verifyLoggingOfJvmOptions(false, "options", "-Xms2G"); } }
```suggestion return grams.toArray(new String[grams.size()]); ``` Revert IntelliJ's silly suggestion of using empty array for `.toArray(...)`.
public String[] getContext(CharSequence document) { int[] normalized = normalizer.normalize(document).codePoints().map(Character::toLowerCase).toArray(); Set<String> grams = new HashSet<>(); for (int i = 0; i < normalized.length; i++) for (int j = minLength; j <= maxLength && i + j < normalized.length; j++) grams.add(new String(normalized, i, j)); return grams.toArray(new String[0]); }
return grams.toArray(new String[0]);
public String[] getContext(CharSequence document) { int[] normalized = normalizer.normalize(document).codePoints().map(Character::toLowerCase).toArray(); Set<String> grams = new HashSet<>(); for (int i = 0; i < normalized.length; i++) for (int j = minLength; j <= maxLength && i + j < normalized.length; j++) grams.add(new String(normalized, i, j)); return grams.toArray(new String[grams.size()]); }
class DefaultLanguageDetectorContextGenerator extends opennlp.tools.langdetect.DefaultLanguageDetectorContextGenerator { public DefaultLanguageDetectorContextGenerator(int minLength, int maxLength, CharSequenceNormalizer... normalizers) { super(minLength, maxLength, normalizers); } @Override }
class DefaultLanguageDetectorContextGenerator extends opennlp.tools.langdetect.DefaultLanguageDetectorContextGenerator { public DefaultLanguageDetectorContextGenerator(int minLength, int maxLength, CharSequenceNormalizer... normalizers) { super(minLength, maxLength, normalizers); } @Override }
Consider removing `append(' ')` as this is always removed by line 85 `return b.toString().trim()`
public String getJvmOptions() { StringBuilder b = new StringBuilder(); if (isHostedVespa) { if (hasDocproc()) { b.append(ApplicationContainer.defaultHostedJVMArgs).append(' '); } if (enableServerOcspStapling) { b.append("-Djdk.tls.server.enableStatusRequestExtension=true ") .append("-Djdk.tls.stapling.responseTimeout=2000 ") .append("-Djdk.tls.stapling.cacheSize=256 ") .append("-Djdk.tls.stapling.cacheLifetime=3600 "); } } String jvmArgs = super.getJvmOptions(); if (!jvmArgs.isBlank()) { b.append(jvmArgs.trim()).append(' '); } return b.toString().trim(); }
b.append(jvmArgs.trim()).append(' ');
public String getJvmOptions() { StringBuilder b = new StringBuilder(); if (isHostedVespa) { if (hasDocproc()) { b.append(ApplicationContainer.defaultHostedJVMArgs).append(' '); } if (enableServerOcspStapling) { b.append("-Djdk.tls.server.enableStatusRequestExtension=true ") .append("-Djdk.tls.stapling.responseTimeout=2000 ") .append("-Djdk.tls.stapling.cacheSize=256 ") .append("-Djdk.tls.stapling.cacheLifetime=3600 "); } } String jvmArgs = super.getJvmOptions(); if (!jvmArgs.isBlank()) { b.append(jvmArgs.trim()); } return b.toString().trim(); }
class ApplicationContainer extends Container implements QrStartConfig.Producer, ZookeeperServerConfig.Producer { private static final String defaultHostedJVMArgs = "-XX:+SuppressFatalErrorMessage"; private final boolean isHostedVespa; private final boolean enableServerOcspStapling; public ApplicationContainer(AbstractConfigProducer<?> parent, String name, int index, DeployState deployState) { this(parent, name, false, index, deployState); } public ApplicationContainer(AbstractConfigProducer<?> parent, String name, boolean retired, int index, DeployState deployState) { super(parent, name, retired, index, deployState); this.isHostedVespa = deployState.isHosted(); this.enableServerOcspStapling = deployState.featureFlags().enableServerOcspStapling(); addComponent(new SimpleComponent("com.yahoo.container.jdisc.messagebus.NetworkMultiplexerHolder")); addComponent(new SimpleComponent("com.yahoo.container.jdisc.messagebus.NetworkMultiplexerProvider")); addComponent(new SimpleComponent("com.yahoo.container.jdisc.messagebus.SessionCache")); addComponent(new SimpleComponent("com.yahoo.container.jdisc.SystemInfoProvider")); addComponent(new SimpleComponent("com.yahoo.container.jdisc.ZoneInfoProvider")); } @Override public void getConfig(QrStartConfig.Builder builder) { if (getHostResource() != null) { NodeResources nodeResources = getHostResource().realResources(); if ( ! nodeResources.isUnspecified()) { builder.jvm.availableProcessors(Math.max(2, (int)Math.ceil(nodeResources.vcpu()))); } } } @Override protected ContainerServiceType myServiceType() { if (parent instanceof ContainerCluster) { ContainerCluster<?> cluster = (ContainerCluster<?>)parent; if (cluster.getSearch() != null && cluster.getDocproc() == null && cluster.getDocumentApi() == null) { return ContainerServiceType.QRSERVER; } } return ContainerServiceType.CONTAINER; } /** Returns the jvm arguments this should start with */ @Override private boolean hasDocproc() { return (parent instanceof ContainerCluster) && (((ContainerCluster<?>)parent).getDocproc() != null); } @Override public void getConfig(ZookeeperServerConfig.Builder builder) { builder.myid(index()); } @Override protected String jvmOmitStackTraceInFastThrowOption(ModelContext.FeatureFlags featureFlags) { return featureFlags.jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type.container); } }
class ApplicationContainer extends Container implements QrStartConfig.Producer, ZookeeperServerConfig.Producer { private static final String defaultHostedJVMArgs = "-XX:+SuppressFatalErrorMessage"; private final boolean isHostedVespa; private final boolean enableServerOcspStapling; public ApplicationContainer(AbstractConfigProducer<?> parent, String name, int index, DeployState deployState) { this(parent, name, false, index, deployState); } public ApplicationContainer(AbstractConfigProducer<?> parent, String name, boolean retired, int index, DeployState deployState) { super(parent, name, retired, index, deployState); this.isHostedVespa = deployState.isHosted(); this.enableServerOcspStapling = deployState.featureFlags().enableServerOcspStapling(); addComponent(new SimpleComponent("com.yahoo.container.jdisc.messagebus.NetworkMultiplexerHolder")); addComponent(new SimpleComponent("com.yahoo.container.jdisc.messagebus.NetworkMultiplexerProvider")); addComponent(new SimpleComponent("com.yahoo.container.jdisc.messagebus.SessionCache")); addComponent(new SimpleComponent("com.yahoo.container.jdisc.SystemInfoProvider")); addComponent(new SimpleComponent("com.yahoo.container.jdisc.ZoneInfoProvider")); } @Override public void getConfig(QrStartConfig.Builder builder) { if (getHostResource() != null) { NodeResources nodeResources = getHostResource().realResources(); if ( ! nodeResources.isUnspecified()) { builder.jvm.availableProcessors(Math.max(2, (int)Math.ceil(nodeResources.vcpu()))); } } } @Override protected ContainerServiceType myServiceType() { if (parent instanceof ContainerCluster) { ContainerCluster<?> cluster = (ContainerCluster<?>)parent; if (cluster.getSearch() != null && cluster.getDocproc() == null && cluster.getDocumentApi() == null) { return ContainerServiceType.QRSERVER; } } return ContainerServiceType.CONTAINER; } /** Returns the jvm arguments this should start with */ @Override private boolean hasDocproc() { return (parent instanceof ContainerCluster) && (((ContainerCluster<?>)parent).getDocproc() != null); } @Override public void getConfig(ZookeeperServerConfig.Builder builder) { builder.myid(index()); } @Override protected String jvmOmitStackTraceInFastThrowOption(ModelContext.FeatureFlags featureFlags) { return featureFlags.jvmOmitStackTraceInFastThrowOption(ClusterSpec.Type.container); } }
😎
public void testInvalidYqlQuery() throws IOException { IOUtils.copyDirectory(new File(testDir, "config_yql"), new File(tempDir), 1); generateComponentsConfigForActive(); configurer.reloadConfig(); SearchHandler newSearchHandler = fetchSearchHandler(configurer); assertTrue("Do I have a new instance of the search handler?", searchHandler != newSearchHandler); try (RequestHandlerTestDriver newDriver = new RequestHandlerTestDriver(newSearchHandler)) { ObjectNode json = jsonMapper.createObjectNode(); json.put("yql", "selectz * from foo where bar > 1453501295"); RequestHandlerTestDriver.MockResponseHandler responseHandler = newDriver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE); responseHandler.readAll(); assertThat(responseHandler.getStatus(), is(400)); } }
json.put("yql", "selectz * from foo where bar > 1453501295");
public void testInvalidYqlQuery() throws IOException { IOUtils.copyDirectory(new File(testDir, "config_yql"), new File(tempDir), 1); generateComponentsConfigForActive(); configurer.reloadConfig(); SearchHandler newSearchHandler = fetchSearchHandler(configurer); assertTrue("Do I have a new instance of the search handler?", searchHandler != newSearchHandler); try (RequestHandlerTestDriver newDriver = new RequestHandlerTestDriver(newSearchHandler)) { ObjectNode json = jsonMapper.createObjectNode(); json.put("yql", "selectz * from foo where bar > 1453501295"); RequestHandlerTestDriver.MockResponseHandler responseHandler = newDriver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE); responseHandler.readAll(); assertThat(responseHandler.getStatus(), is(400)); } }
class JSONSearchHandlerTestCase { private static final ObjectMapper jsonMapper = new ObjectMapper(); private static final String testDir = "src/test/java/com/yahoo/search/handler/test/config"; private static final String myHostnameHeader = "my-hostname-header"; private static final String selfHostname = HostName.getLocalhost(); private static String tempDir = ""; private static String configId = null; private static final String uri = "http: private static final String JSON_CONTENT_TYPE = "application/json"; @Rule public TemporaryFolder tempfolder = new TemporaryFolder(); private RequestHandlerTestDriver driver = null; private HandlersConfigurerTestWrapper configurer = null; private SearchHandler searchHandler; @Before public void startUp() throws IOException { File cfgDir = tempfolder.newFolder("SearchHandlerTestCase"); tempDir = cfgDir.getAbsolutePath(); configId = "dir:" + tempDir; IOUtils.copyDirectory(new File(testDir), cfgDir, 1); generateComponentsConfigForActive(); configurer = new HandlersConfigurerTestWrapper(new Container(), configId); searchHandler = (SearchHandler)configurer.getRequestHandlerRegistry().getComponent(SearchHandler.class.getName()); driver = new RequestHandlerTestDriver(searchHandler); } @After public void shutDown() { if (configurer != null) configurer.shutdown(); if (driver != null) driver.close(); } private void generateComponentsConfigForActive() throws IOException { File activeConfig = new File(tempDir); SearchChainConfigurerTestCase. createComponentsConfig(new File(activeConfig, "chains.cfg").getPath(), new File(activeConfig, "handlers.cfg").getPath(), new File(activeConfig, "components.cfg").getPath()); } private SearchHandler fetchSearchHandler(HandlersConfigurerTestWrapper configurer) { return (SearchHandler) configurer.getRequestHandlerRegistry().getComponent(SearchHandler.class.getName()); } @Test public void testBadJSON() { String json = "Not a valid JSON-string"; RequestHandlerTestDriver.MockResponseHandler responseHandler = driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json, JSON_CONTENT_TYPE); String response = responseHandler.readAll(); assertThat(responseHandler.getStatus(), is(400)); assertThat(response, containsString("errors")); assertThat(response, containsString("\"code\":" + Error.ILLEGAL_QUERY.code)); } @Test public void testFailing() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "test"); json.put("searchChain", "classLoadingError"); assertTrue(driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE).readAll().contains("NoClassDefFoundError")); } @Test public synchronized void testPluginError() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "test"); json.put("searchChain", "exceptionInPlugin"); assertTrue(driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE).readAll().contains("NullPointerException")); } @Test public synchronized void testWorkingReconfiguration() throws IOException { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); assertJsonResult(json, driver); IOUtils.copyDirectory(new File(testDir, "handlers2"), new File(tempDir), 1); generateComponentsConfigForActive(); configurer.reloadConfig(); SearchHandler newSearchHandler = fetchSearchHandler(configurer); assertNotSame("Have a new instance of the search handler", searchHandler, newSearchHandler); assertNotNull("Have the new search chain", fetchSearchHandler(configurer).getSearchChainRegistry().getChain("hello")); assertNull("Don't have the new search chain", fetchSearchHandler(configurer).getSearchChainRegistry().getChain("classLoadingError")); try (RequestHandlerTestDriver newDriver = new RequestHandlerTestDriver(newSearchHandler)) { assertJsonResult(json, newDriver); } } @Test @Test public void testInvalidQueryParamWithQueryProfile() throws IOException { try (RequestHandlerTestDriver newDriver = driverWithConfig("config_invalid_param")) { testInvalidQueryParam(newDriver); } } private void testInvalidQueryParam(final RequestHandlerTestDriver testDriver) { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "status_code:0"); json.put("hits", 20); json.put("offset", -20); RequestHandlerTestDriver.MockResponseHandler responseHandler = testDriver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE); String response = responseHandler.readAll(); assertThat(responseHandler.getStatus(), is(400)); assertThat(response, containsString("offset")); assertThat(response, containsString("\"code\":" + com.yahoo.container.protect.Error.ILLEGAL_QUERY.code)); } @Test public void testNormalResultJsonAliasRendering() { ObjectNode json = jsonMapper.createObjectNode(); json.put("format", "json"); json.put("query", "abc"); assertJsonResult(json, driver); } @Test public void testNullQuery() { ObjectNode json = jsonMapper.createObjectNode(); json.put("format", "xml"); assertEquals("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<result total-hit-count=\"0\">\n" + " <hit relevancy=\"1.0\">\n" + " <field name=\"relevancy\">1.0</field>\n" + " <field name=\"uri\">testHit</field>\n" + " </hit>\n" + "</result>\n", driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE).readAll()); } @Test public void testWebServiceStatus() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "web_service_status_code"); RequestHandlerTestDriver.MockResponseHandler responseHandler = driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE); String response = responseHandler.readAll(); assertThat(responseHandler.getStatus(), is(406)); assertThat(response, containsString("\"code\":" + 406)); } @Test public void testNormalResultImplicitDefaultRendering() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); assertJsonResult(json, driver); } @Test public void testNormalResultExplicitDefaultRendering() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); json.put("format", "default"); assertJsonResult(json, driver); } @Test public void testNormalResultXmlAliasRendering() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); json.put("format", "xml"); assertXmlResult(json, driver); } @Test public void testNormalResultExplicitDefaultRenderingFullRendererName1() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); json.put("format", "XmlRenderer"); assertXmlResult(json, driver); } @Test public void testNormalResultExplicitDefaultRenderingFullRendererName2() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); json.put("format", "JsonRenderer"); assertJsonResult(json, driver); } private static final String xmlResult = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<result total-hit-count=\"0\">\n" + " <hit relevancy=\"1.0\">\n" + " <field name=\"relevancy\">1.0</field>\n" + " <field name=\"uri\">testHit</field>\n" + " </hit>\n" + "</result>\n"; private void assertXmlResult(JsonNode json, RequestHandlerTestDriver driver) { assertOkResult(driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE), xmlResult); } private static final String jsonResult = "{\"root\":{" + "\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0}," + "\"children\":[" + "{\"id\":\"testHit\",\"relevance\":1.0,\"fields\":{\"uri\":\"testHit\"}}" + "]}}"; private void assertJsonResult(JsonNode json, RequestHandlerTestDriver driver) { assertOkResult(driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE), jsonResult); } private static final String pageResult = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<page version=\"1.0\">\n" + "\n" + " <content>\n" + " <hit relevance=\"1.0\">\n" + " <id>testHit</id>\n" + " <uri>testHit</uri>\n" + " </hit>\n" + " </content>\n" + "\n" + "</page>\n"; private void assertOkResult(RequestHandlerTestDriver.MockResponseHandler response, String expected) { assertEquals(expected, response.readAll()); assertEquals(200, response.getStatus()); assertEquals(selfHostname, response.getResponse().headers().get(myHostnameHeader).get(0)); } private RequestHandlerTestDriver driverWithConfig(String configDirectory) throws IOException { IOUtils.copyDirectory(new File(testDir, configDirectory), new File(tempDir), 1); generateComponentsConfigForActive(); configurer.reloadConfig(); SearchHandler newSearchHandler = fetchSearchHandler(configurer); assertTrue("Do I have a new instance of the search handler?", searchHandler != newSearchHandler); return new RequestHandlerTestDriver(newSearchHandler); } @Test public void testSelectParameters() throws IOException { ObjectNode json = jsonMapper.createObjectNode(); ObjectNode select = jsonMapper.createObjectNode(); ObjectNode where = jsonMapper.createObjectNode(); where.put("where", "where"); ObjectNode grouping = jsonMapper.createObjectNode(); grouping.put("grouping", "grouping"); select.set("where", where); select.set("grouping", grouping); json.set("select", select); Inspector inspector = SlimeUtils.jsonToSlime(json.toString().getBytes(StandardCharsets.UTF_8)).get(); Map<String, String> map = new HashMap<>(); searchHandler.createRequestMapping(inspector, map, ""); JsonNode processedWhere = jsonMapper.readTree(map.get("select.where")); JsonTestHelper.assertJsonEquals(where.toString(), processedWhere.toString()); JsonNode processedGrouping = jsonMapper.readTree(map.get("select.grouping")); JsonTestHelper.assertJsonEquals(grouping.toString(), processedGrouping.toString()); } @Test public void testJsonQueryWithSelectWhere() { ObjectNode root = jsonMapper.createObjectNode(); ObjectNode select = jsonMapper.createObjectNode(); ObjectNode where = jsonMapper.createObjectNode(); ArrayNode term = jsonMapper.createArrayNode(); term.add("default"); term.add("bad"); where.set("contains", term); select.set("where", where); root.set("select", select); String result = driver.sendRequest(uri + "searchChain=echoingQuery", com.yahoo.jdisc.http.HttpRequest.Method.POST, root.toString(), JSON_CONTENT_TYPE).readAll(); assertEquals("{\"root\":{\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0},\"children\":[{\"id\":\"Query\",\"relevance\":1.0,\"fields\":{\"query\":\"select * from sources * where default contains \\\"bad\\\";\"}}]}}", result); } @Test public void testJsonWithWhereAndGroupingUnderSelect() { String query = "{\n" + " \"select\": {\n" + " \"where\": {\n" + " \"contains\": [\n" + " \"field\",\n" + " \"term\"\n" + " ]\n" + " },\n" + " \"grouping\":[\n" + " {\n" + " \"all\": {\n" + " \"output\": \"count()\"\n" + " }\n" + " }\n" + " ]\n" + " }\n" + "}\n"; String result = driver.sendRequest(uri + "searchChain=echoingQuery", com.yahoo.jdisc.http.HttpRequest.Method.POST, query, JSON_CONTENT_TYPE).readAll(); String expected = "{\"root\":{\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0},\"children\":[{\"id\":\"Query\",\"relevance\":1.0,\"fields\":{\"query\":\"select * from sources * where field contains \\\"term\\\" | all(output(count()));\"}}]}}"; assertEquals(expected, result); } @Test public void testJsonWithWhereAndGroupingSeparate() { String query = "{\n" + " \"select.where\": {\n" + " \"contains\": [\n" + " \"field\",\n" + " \"term\"\n" + " ]\n" + " },\n" + " \"select.grouping\":[\n" + " {\n" + " \"all\": {\n" + " \"output\": \"count()\"\n" + " }\n" + " }\n" + " ]\n" + "}\n"; String result = driver.sendRequest(uri + "searchChain=echoingQuery", com.yahoo.jdisc.http.HttpRequest.Method.POST, query, JSON_CONTENT_TYPE).readAll(); String expected = "{\"root\":{\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0},\"children\":[{\"id\":\"Query\",\"relevance\":1.0,\"fields\":{\"query\":\"select * from sources * where field contains \\\"term\\\" | all(output(count()));\"}}]}}"; assertEquals(expected, result); } @Test public void testJsonQueryWithYQL() { ObjectNode root = jsonMapper.createObjectNode(); root.put("yql", "select * from sources * where default contains 'bad';"); String result = driver.sendRequest(uri + "searchChain=echoingQuery", com.yahoo.jdisc.http.HttpRequest.Method.POST, root.toString(), JSON_CONTENT_TYPE).readAll(); assertEquals("{\"root\":{\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0},\"children\":[{\"id\":\"Query\",\"relevance\":1.0,\"fields\":{\"query\":\"select * from sources * where default contains \\\"bad\\\";\"}}]}}", result); } @Test public void testRequestMapping() { ObjectNode json = jsonMapper.createObjectNode(); json.put("yql", "select * from sources * where sddocname contains \"blog_post\" limit 0 | all(group(date) max(3) order(-count())each(output(count())));"); json.put("hits", 10); json.put("offset", 5); json.put("queryProfile", "foo"); json.put("nocache", false); json.put("groupingSessionCache", false); json.put("searchChain", "exceptionInPlugin"); json.put("timeout", 0); json.put("select", "_all"); ObjectNode model = jsonMapper.createObjectNode(); model.put("defaultIndex", 1); model.put("encoding", "json"); model.put("filter", "default"); model.put("language", "en"); model.put("queryString", "abc"); model.put("restrict", "_doc,json,xml"); model.put("searchPath", "node1"); model.put("sources", "source1,source2"); model.put("type", "yql"); json.set("model", model); ObjectNode ranking = jsonMapper.createObjectNode(); ranking.put("location", "123789.89123N;128123W"); ranking.put("features", "none"); ranking.put("listFeatures", false); ranking.put("profile", "1"); ranking.put("properties", "default"); ranking.put("sorting", "desc"); ranking.put("freshness", "0.05"); ranking.put("queryCache", false); ObjectNode matchPhase = jsonMapper.createObjectNode(); matchPhase.put("maxHits", "100"); matchPhase.put("attribute", "title"); matchPhase.put("ascending", true); ObjectNode diversity = jsonMapper.createObjectNode(); diversity.put("attribute", "title"); diversity.put("minGroups", 1); matchPhase.set("diversity", diversity); ranking.set("matchPhase", matchPhase); json.set("ranking", ranking); ObjectNode presentation = jsonMapper.createObjectNode(); presentation.put("bolding", true); presentation.put("format", "json"); presentation.put("summary", "none"); presentation.put("template", "json"); presentation.put("timing", false); json.set("presentation", presentation); ObjectNode collapse = jsonMapper.createObjectNode(); collapse.put("field", "none"); collapse.put("size", 2); collapse.put("summary", "default"); json.set("collapse", collapse); ObjectNode trace = jsonMapper.createObjectNode(); trace.put("level", 1); trace.put("timestamps", false); trace.put("rules", "none"); json.set("trace", trace); ObjectNode pos = jsonMapper.createObjectNode(); pos.put("ll", "1263123N;1231.9W"); pos.put("radius", "71234m"); pos.put("bb", "1237123W;123218N"); pos.put("attribute", "default"); json.set("pos", pos); ObjectNode streaming = jsonMapper.createObjectNode(); streaming.put("userid", 123); streaming.put("groupname", "abc"); streaming.put("selection", "none"); streaming.put("priority", 10); streaming.put("maxbucketspervisitor", 5); json.set("streaming", streaming); ObjectNode rules = jsonMapper.createObjectNode(); rules.put("off", false); rules.put("rulebase", "default"); json.set("rules", rules); ObjectNode metrics = jsonMapper.createObjectNode(); metrics.put("ignore", "_all"); json.set("metrics", metrics); json.put("recall", "none"); json.put("user", 123); json.put("nocachewrite", false); json.put("hitcountestimate", true); Inspector inspector = SlimeUtils.jsonToSlime(json.toString().getBytes(StandardCharsets.UTF_8)).get(); Map<String, String> map = new HashMap<>(); searchHandler.createRequestMapping(inspector, map, ""); String url = uri + "&model.sources=source1%2Csource2&select=_all&model.language=en&presentation.timing=false&pos.attribute=default&pos.radius=71234m&model.searchPath=node1&nocachewrite=false&ranking.matchPhase.maxHits=100&presentation.summary=none" + "&nocache=false&model.type=yql&collapse.summary=default&ranking.matchPhase.diversity.minGroups=1&ranking.location=123789.89123N%3B128123W&ranking.queryCache=false&offset=5&streaming.groupname=abc&groupingSessionCache=false" + "&presentation.template=json&trace.rules=none&rules.off=false&ranking.properties=default&searchChain=exceptionInPlugin&pos.ll=1263123N%3B1231.9W&ranking.sorting=desc&ranking.matchPhase.ascending=true&ranking.features=none&hitcountestimate=true" + "&model.filter=default&metrics.ignore=_all&collapse.field=none&ranking.profile=1&rules.rulebase=default&model.defaultIndex=1&trace.level=1&ranking.listFeatures=false&timeout=0&presentation.format=json" + "&yql=select+%2A+from+sources+%2A+where+sddocname+contains+%22blog_post%22+limit+0+%7C+all%28group%28date%29+max%283%29+order%28-count%28%29%29each%28output%28count%28%29%29%29%29%3B&recall=none&streaming.maxbucketspervisitor=5" + "&queryProfile=foo&presentation.bolding=true&model.encoding=json&model.queryString=abc&streaming.selection=none&trace.timestamps=false&collapse.size=2&streaming.priority=10&ranking.matchPhase.diversity.attribute=title" + "&ranking.matchPhase.attribute=title&hits=10&streaming.userid=123&pos.bb=1237123W%3B123218N&model.restrict=_doc%2Cjson%2Cxml&ranking.freshness=0.05&user=123"; HttpRequest request = HttpRequest.createTestRequest(url, GET); Map<String, String> propertyMap = request.propertyMap(); Assertions.assertThat(propertyMap).isEqualTo(map); } @Test public void testContentTypeParsing() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); assertOkResult(driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), "Application/JSON; charset=utf-8"), jsonResult); } }
class JSONSearchHandlerTestCase { private static final ObjectMapper jsonMapper = new ObjectMapper(); private static final String testDir = "src/test/java/com/yahoo/search/handler/test/config"; private static final String myHostnameHeader = "my-hostname-header"; private static final String selfHostname = HostName.getLocalhost(); private static String tempDir = ""; private static String configId = null; private static final String uri = "http: private static final String JSON_CONTENT_TYPE = "application/json"; @Rule public TemporaryFolder tempfolder = new TemporaryFolder(); private RequestHandlerTestDriver driver = null; private HandlersConfigurerTestWrapper configurer = null; private SearchHandler searchHandler; @Before public void startUp() throws IOException { File cfgDir = tempfolder.newFolder("SearchHandlerTestCase"); tempDir = cfgDir.getAbsolutePath(); configId = "dir:" + tempDir; IOUtils.copyDirectory(new File(testDir), cfgDir, 1); generateComponentsConfigForActive(); configurer = new HandlersConfigurerTestWrapper(new Container(), configId); searchHandler = (SearchHandler)configurer.getRequestHandlerRegistry().getComponent(SearchHandler.class.getName()); driver = new RequestHandlerTestDriver(searchHandler); } @After public void shutDown() { if (configurer != null) configurer.shutdown(); if (driver != null) driver.close(); } private void generateComponentsConfigForActive() throws IOException { File activeConfig = new File(tempDir); SearchChainConfigurerTestCase. createComponentsConfig(new File(activeConfig, "chains.cfg").getPath(), new File(activeConfig, "handlers.cfg").getPath(), new File(activeConfig, "components.cfg").getPath()); } private SearchHandler fetchSearchHandler(HandlersConfigurerTestWrapper configurer) { return (SearchHandler) configurer.getRequestHandlerRegistry().getComponent(SearchHandler.class.getName()); } @Test public void testBadJSON() { String json = "Not a valid JSON-string"; RequestHandlerTestDriver.MockResponseHandler responseHandler = driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json, JSON_CONTENT_TYPE); String response = responseHandler.readAll(); assertThat(responseHandler.getStatus(), is(400)); assertThat(response, containsString("errors")); assertThat(response, containsString("\"code\":" + Error.ILLEGAL_QUERY.code)); } @Test public void testFailing() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "test"); json.put("searchChain", "classLoadingError"); assertTrue(driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE).readAll().contains("NoClassDefFoundError")); } @Test public synchronized void testPluginError() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "test"); json.put("searchChain", "exceptionInPlugin"); assertTrue(driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE).readAll().contains("NullPointerException")); } @Test public synchronized void testWorkingReconfiguration() throws IOException { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); assertJsonResult(json, driver); IOUtils.copyDirectory(new File(testDir, "handlers2"), new File(tempDir), 1); generateComponentsConfigForActive(); configurer.reloadConfig(); SearchHandler newSearchHandler = fetchSearchHandler(configurer); assertNotSame("Have a new instance of the search handler", searchHandler, newSearchHandler); assertNotNull("Have the new search chain", fetchSearchHandler(configurer).getSearchChainRegistry().getChain("hello")); assertNull("Don't have the new search chain", fetchSearchHandler(configurer).getSearchChainRegistry().getChain("classLoadingError")); try (RequestHandlerTestDriver newDriver = new RequestHandlerTestDriver(newSearchHandler)) { assertJsonResult(json, newDriver); } } @Test @Test public void testInvalidQueryParamWithQueryProfile() throws IOException { try (RequestHandlerTestDriver newDriver = driverWithConfig("config_invalid_param")) { testInvalidQueryParam(newDriver); } } private void testInvalidQueryParam(final RequestHandlerTestDriver testDriver) { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "status_code:0"); json.put("hits", 20); json.put("offset", -20); RequestHandlerTestDriver.MockResponseHandler responseHandler = testDriver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE); String response = responseHandler.readAll(); assertThat(responseHandler.getStatus(), is(400)); assertThat(response, containsString("offset")); assertThat(response, containsString("\"code\":" + com.yahoo.container.protect.Error.ILLEGAL_QUERY.code)); } @Test public void testNormalResultJsonAliasRendering() { ObjectNode json = jsonMapper.createObjectNode(); json.put("format", "json"); json.put("query", "abc"); assertJsonResult(json, driver); } @Test public void testNullQuery() { ObjectNode json = jsonMapper.createObjectNode(); json.put("format", "xml"); assertEquals("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<result total-hit-count=\"0\">\n" + " <hit relevancy=\"1.0\">\n" + " <field name=\"relevancy\">1.0</field>\n" + " <field name=\"uri\">testHit</field>\n" + " </hit>\n" + "</result>\n", driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE).readAll()); } @Test public void testWebServiceStatus() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "web_service_status_code"); RequestHandlerTestDriver.MockResponseHandler responseHandler = driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE); String response = responseHandler.readAll(); assertThat(responseHandler.getStatus(), is(406)); assertThat(response, containsString("\"code\":" + 406)); } @Test public void testNormalResultImplicitDefaultRendering() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); assertJsonResult(json, driver); } @Test public void testNormalResultExplicitDefaultRendering() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); json.put("format", "default"); assertJsonResult(json, driver); } @Test public void testNormalResultXmlAliasRendering() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); json.put("format", "xml"); assertXmlResult(json, driver); } @Test public void testNormalResultExplicitDefaultRenderingFullRendererName1() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); json.put("format", "XmlRenderer"); assertXmlResult(json, driver); } @Test public void testNormalResultExplicitDefaultRenderingFullRendererName2() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); json.put("format", "JsonRenderer"); assertJsonResult(json, driver); } private static final String xmlResult = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<result total-hit-count=\"0\">\n" + " <hit relevancy=\"1.0\">\n" + " <field name=\"relevancy\">1.0</field>\n" + " <field name=\"uri\">testHit</field>\n" + " </hit>\n" + "</result>\n"; private void assertXmlResult(JsonNode json, RequestHandlerTestDriver driver) { assertOkResult(driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE), xmlResult); } private static final String jsonResult = "{\"root\":{" + "\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0}," + "\"children\":[" + "{\"id\":\"testHit\",\"relevance\":1.0,\"fields\":{\"uri\":\"testHit\"}}" + "]}}"; private void assertJsonResult(JsonNode json, RequestHandlerTestDriver driver) { assertOkResult(driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), JSON_CONTENT_TYPE), jsonResult); } private static final String pageResult = "<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n" + "<page version=\"1.0\">\n" + "\n" + " <content>\n" + " <hit relevance=\"1.0\">\n" + " <id>testHit</id>\n" + " <uri>testHit</uri>\n" + " </hit>\n" + " </content>\n" + "\n" + "</page>\n"; private void assertOkResult(RequestHandlerTestDriver.MockResponseHandler response, String expected) { assertEquals(expected, response.readAll()); assertEquals(200, response.getStatus()); assertEquals(selfHostname, response.getResponse().headers().get(myHostnameHeader).get(0)); } private RequestHandlerTestDriver driverWithConfig(String configDirectory) throws IOException { IOUtils.copyDirectory(new File(testDir, configDirectory), new File(tempDir), 1); generateComponentsConfigForActive(); configurer.reloadConfig(); SearchHandler newSearchHandler = fetchSearchHandler(configurer); assertTrue("Do I have a new instance of the search handler?", searchHandler != newSearchHandler); return new RequestHandlerTestDriver(newSearchHandler); } @Test public void testSelectParameters() throws IOException { ObjectNode json = jsonMapper.createObjectNode(); ObjectNode select = jsonMapper.createObjectNode(); ObjectNode where = jsonMapper.createObjectNode(); where.put("where", "where"); ObjectNode grouping = jsonMapper.createObjectNode(); grouping.put("grouping", "grouping"); select.set("where", where); select.set("grouping", grouping); json.set("select", select); Inspector inspector = SlimeUtils.jsonToSlime(json.toString().getBytes(StandardCharsets.UTF_8)).get(); Map<String, String> map = new HashMap<>(); searchHandler.createRequestMapping(inspector, map, ""); JsonNode processedWhere = jsonMapper.readTree(map.get("select.where")); JsonTestHelper.assertJsonEquals(where.toString(), processedWhere.toString()); JsonNode processedGrouping = jsonMapper.readTree(map.get("select.grouping")); JsonTestHelper.assertJsonEquals(grouping.toString(), processedGrouping.toString()); } @Test public void testJsonQueryWithSelectWhere() { ObjectNode root = jsonMapper.createObjectNode(); ObjectNode select = jsonMapper.createObjectNode(); ObjectNode where = jsonMapper.createObjectNode(); ArrayNode term = jsonMapper.createArrayNode(); term.add("default"); term.add("bad"); where.set("contains", term); select.set("where", where); root.set("select", select); String result = driver.sendRequest(uri + "searchChain=echoingQuery", com.yahoo.jdisc.http.HttpRequest.Method.POST, root.toString(), JSON_CONTENT_TYPE).readAll(); assertEquals("{\"root\":{\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0},\"children\":[{\"id\":\"Query\",\"relevance\":1.0,\"fields\":{\"query\":\"select * from sources * where default contains \\\"bad\\\";\"}}]}}", result); } @Test public void testJsonWithWhereAndGroupingUnderSelect() { String query = "{\n" + " \"select\": {\n" + " \"where\": {\n" + " \"contains\": [\n" + " \"field\",\n" + " \"term\"\n" + " ]\n" + " },\n" + " \"grouping\":[\n" + " {\n" + " \"all\": {\n" + " \"output\": \"count()\"\n" + " }\n" + " }\n" + " ]\n" + " }\n" + "}\n"; String result = driver.sendRequest(uri + "searchChain=echoingQuery", com.yahoo.jdisc.http.HttpRequest.Method.POST, query, JSON_CONTENT_TYPE).readAll(); String expected = "{\"root\":{\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0},\"children\":[{\"id\":\"Query\",\"relevance\":1.0,\"fields\":{\"query\":\"select * from sources * where field contains \\\"term\\\" | all(output(count()));\"}}]}}"; assertEquals(expected, result); } @Test public void testJsonWithWhereAndGroupingSeparate() { String query = "{\n" + " \"select.where\": {\n" + " \"contains\": [\n" + " \"field\",\n" + " \"term\"\n" + " ]\n" + " },\n" + " \"select.grouping\":[\n" + " {\n" + " \"all\": {\n" + " \"output\": \"count()\"\n" + " }\n" + " }\n" + " ]\n" + "}\n"; String result = driver.sendRequest(uri + "searchChain=echoingQuery", com.yahoo.jdisc.http.HttpRequest.Method.POST, query, JSON_CONTENT_TYPE).readAll(); String expected = "{\"root\":{\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0},\"children\":[{\"id\":\"Query\",\"relevance\":1.0,\"fields\":{\"query\":\"select * from sources * where field contains \\\"term\\\" | all(output(count()));\"}}]}}"; assertEquals(expected, result); } @Test public void testJsonQueryWithYQL() { ObjectNode root = jsonMapper.createObjectNode(); root.put("yql", "select * from sources * where default contains 'bad';"); String result = driver.sendRequest(uri + "searchChain=echoingQuery", com.yahoo.jdisc.http.HttpRequest.Method.POST, root.toString(), JSON_CONTENT_TYPE).readAll(); assertEquals("{\"root\":{\"id\":\"toplevel\",\"relevance\":1.0,\"fields\":{\"totalCount\":0},\"children\":[{\"id\":\"Query\",\"relevance\":1.0,\"fields\":{\"query\":\"select * from sources * where default contains \\\"bad\\\";\"}}]}}", result); } @Test public void testRequestMapping() { ObjectNode json = jsonMapper.createObjectNode(); json.put("yql", "select * from sources * where sddocname contains \"blog_post\" limit 0 | all(group(date) max(3) order(-count())each(output(count())));"); json.put("hits", 10); json.put("offset", 5); json.put("queryProfile", "foo"); json.put("nocache", false); json.put("groupingSessionCache", false); json.put("searchChain", "exceptionInPlugin"); json.put("timeout", 0); json.put("select", "_all"); ObjectNode model = jsonMapper.createObjectNode(); model.put("defaultIndex", 1); model.put("encoding", "json"); model.put("filter", "default"); model.put("language", "en"); model.put("queryString", "abc"); model.put("restrict", "_doc,json,xml"); model.put("searchPath", "node1"); model.put("sources", "source1,source2"); model.put("type", "yql"); json.set("model", model); ObjectNode ranking = jsonMapper.createObjectNode(); ranking.put("location", "123789.89123N;128123W"); ranking.put("features", "none"); ranking.put("listFeatures", false); ranking.put("profile", "1"); ranking.put("properties", "default"); ranking.put("sorting", "desc"); ranking.put("freshness", "0.05"); ranking.put("queryCache", false); ObjectNode matchPhase = jsonMapper.createObjectNode(); matchPhase.put("maxHits", "100"); matchPhase.put("attribute", "title"); matchPhase.put("ascending", true); ObjectNode diversity = jsonMapper.createObjectNode(); diversity.put("attribute", "title"); diversity.put("minGroups", 1); matchPhase.set("diversity", diversity); ranking.set("matchPhase", matchPhase); json.set("ranking", ranking); ObjectNode presentation = jsonMapper.createObjectNode(); presentation.put("bolding", true); presentation.put("format", "json"); presentation.put("summary", "none"); presentation.put("template", "json"); presentation.put("timing", false); json.set("presentation", presentation); ObjectNode collapse = jsonMapper.createObjectNode(); collapse.put("field", "none"); collapse.put("size", 2); collapse.put("summary", "default"); json.set("collapse", collapse); ObjectNode trace = jsonMapper.createObjectNode(); trace.put("level", 1); trace.put("timestamps", false); trace.put("rules", "none"); json.set("trace", trace); ObjectNode pos = jsonMapper.createObjectNode(); pos.put("ll", "1263123N;1231.9W"); pos.put("radius", "71234m"); pos.put("bb", "1237123W;123218N"); pos.put("attribute", "default"); json.set("pos", pos); ObjectNode streaming = jsonMapper.createObjectNode(); streaming.put("userid", 123); streaming.put("groupname", "abc"); streaming.put("selection", "none"); streaming.put("priority", 10); streaming.put("maxbucketspervisitor", 5); json.set("streaming", streaming); ObjectNode rules = jsonMapper.createObjectNode(); rules.put("off", false); rules.put("rulebase", "default"); json.set("rules", rules); ObjectNode metrics = jsonMapper.createObjectNode(); metrics.put("ignore", "_all"); json.set("metrics", metrics); json.put("recall", "none"); json.put("user", 123); json.put("nocachewrite", false); json.put("hitcountestimate", true); Inspector inspector = SlimeUtils.jsonToSlime(json.toString().getBytes(StandardCharsets.UTF_8)).get(); Map<String, String> map = new HashMap<>(); searchHandler.createRequestMapping(inspector, map, ""); String url = uri + "&model.sources=source1%2Csource2&select=_all&model.language=en&presentation.timing=false&pos.attribute=default&pos.radius=71234m&model.searchPath=node1&nocachewrite=false&ranking.matchPhase.maxHits=100&presentation.summary=none" + "&nocache=false&model.type=yql&collapse.summary=default&ranking.matchPhase.diversity.minGroups=1&ranking.location=123789.89123N%3B128123W&ranking.queryCache=false&offset=5&streaming.groupname=abc&groupingSessionCache=false" + "&presentation.template=json&trace.rules=none&rules.off=false&ranking.properties=default&searchChain=exceptionInPlugin&pos.ll=1263123N%3B1231.9W&ranking.sorting=desc&ranking.matchPhase.ascending=true&ranking.features=none&hitcountestimate=true" + "&model.filter=default&metrics.ignore=_all&collapse.field=none&ranking.profile=1&rules.rulebase=default&model.defaultIndex=1&trace.level=1&ranking.listFeatures=false&timeout=0&presentation.format=json" + "&yql=select+%2A+from+sources+%2A+where+sddocname+contains+%22blog_post%22+limit+0+%7C+all%28group%28date%29+max%283%29+order%28-count%28%29%29each%28output%28count%28%29%29%29%29%3B&recall=none&streaming.maxbucketspervisitor=5" + "&queryProfile=foo&presentation.bolding=true&model.encoding=json&model.queryString=abc&streaming.selection=none&trace.timestamps=false&collapse.size=2&streaming.priority=10&ranking.matchPhase.diversity.attribute=title" + "&ranking.matchPhase.attribute=title&hits=10&streaming.userid=123&pos.bb=1237123W%3B123218N&model.restrict=_doc%2Cjson%2Cxml&ranking.freshness=0.05&user=123"; HttpRequest request = HttpRequest.createTestRequest(url, GET); Map<String, String> propertyMap = request.propertyMap(); Assertions.assertThat(propertyMap).isEqualTo(map); } @Test public void testContentTypeParsing() { ObjectNode json = jsonMapper.createObjectNode(); json.put("query", "abc"); assertOkResult(driver.sendRequest(uri, com.yahoo.jdisc.http.HttpRequest.Method.POST, json.toString(), "Application/JSON; charset=utf-8"), jsonResult); } }
I guess you should return here if invalidOptions is empty. If not you will throw or log an empty message.
private void validateJvmOptions(String jvmOptions) { if (jvmOptions == null || jvmOptions.isEmpty()) return; String[] optionList = jvmOptions.split(" "); List<String> invalidOptions = Arrays.stream(optionList) .filter(option -> !option.isEmpty()) .filter(option -> !Pattern.matches(validPattern.pattern(), option)) .sorted() .collect(Collectors.toList()); String message = "Invalid JVM options in services.xml: " + String.join(",", invalidOptions); if (failDeploymentWithInvalidJvmOptions) throw new IllegalArgumentException(message); else logger.logApplicationPackage(WARNING, message); }
String message = "Invalid JVM options in services.xml: " + String.join(",", invalidOptions);
private void validateJvmOptions(String jvmOptions) { if (jvmOptions == null || jvmOptions.isEmpty()) return; String[] optionList = jvmOptions.split(" "); List<String> invalidOptions = Arrays.stream(optionList) .filter(option -> !option.isEmpty()) .filter(option -> !Pattern.matches(validPattern.pattern(), option)) .sorted() .collect(Collectors.toList()); if (invalidOptions.isEmpty()) return; String message = "Invalid JVM options in services.xml: " + String.join(",", invalidOptions); if (failDeploymentWithInvalidJvmOptions) throw new IllegalArgumentException(message); else logger.logApplicationPackage(WARNING, message); }
class JvmOptions { private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:]+"); private final ContainerCluster<?> cluster; private final Element nodesElement; private final DeployLogger logger; private final boolean legacyOptions; private final boolean failDeploymentWithInvalidJvmOptions; public JvmOptions(ContainerCluster<?> cluster, Element nodesElement, DeployState deployState, boolean legacyOptions) { this.cluster = cluster; this.nodesElement = nodesElement; this.logger = deployState.getDeployLogger(); this.legacyOptions = legacyOptions; this.failDeploymentWithInvalidJvmOptions = deployState.featureFlags().failDeploymentWithInvalidJvmOptions(); } String build() { if (legacyOptions) return buildLegacyOptions(); Element jvmElement = XML.getChild(nodesElement, "jvm"); if (jvmElement == null) return ""; String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS); if (jvmOptions == null) return ""; validateJvmOptions(jvmOptions); return jvmOptions; } String buildLegacyOptions() { String jvmOptions; if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); validateJvmOptions(jvmOptions); if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + " and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into 'options' in 'jvm' element." + " See https: } } else { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); validateJvmOptions(jvmOptions); if (incompatibleGCOptions(jvmOptions)) { logger.logApplicationPackage(WARNING, "You need to move your GC-related options from deprecated 'jvmargs' to 'gc-options' in 'jvm' element." + " See https: cluster.setJvmGCOptions(ContainerCluster.G1GC); } } return jvmOptions; } }
class JvmOptions { private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:]+"); private final ContainerCluster<?> cluster; private final Element nodesElement; private final DeployLogger logger; private final boolean legacyOptions; private final boolean failDeploymentWithInvalidJvmOptions; public JvmOptions(ContainerCluster<?> cluster, Element nodesElement, DeployState deployState, boolean legacyOptions) { this.cluster = cluster; this.nodesElement = nodesElement; this.logger = deployState.getDeployLogger(); this.legacyOptions = legacyOptions; this.failDeploymentWithInvalidJvmOptions = deployState.featureFlags().failDeploymentWithInvalidJvmOptions(); } String build() { if (legacyOptions) return buildLegacyOptions(); Element jvmElement = XML.getChild(nodesElement, "jvm"); if (jvmElement == null) return ""; String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS); if (jvmOptions == null) return ""; validateJvmOptions(jvmOptions); return jvmOptions; } String buildLegacyOptions() { String jvmOptions; if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); validateJvmOptions(jvmOptions); if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + " and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into 'options' in 'jvm' element." + " See https: } } else { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); validateJvmOptions(jvmOptions); if (incompatibleGCOptions(jvmOptions)) { logger.logApplicationPackage(WARNING, "You need to move your GC-related options from deprecated 'jvmargs' to 'gc-options' in 'jvm' element." + " See https: cluster.setJvmGCOptions(ContainerCluster.G1GC); } } return jvmOptions; } }
Right, thanks, will fix
private void validateJvmOptions(String jvmOptions) { if (jvmOptions == null || jvmOptions.isEmpty()) return; String[] optionList = jvmOptions.split(" "); List<String> invalidOptions = Arrays.stream(optionList) .filter(option -> !option.isEmpty()) .filter(option -> !Pattern.matches(validPattern.pattern(), option)) .sorted() .collect(Collectors.toList()); String message = "Invalid JVM options in services.xml: " + String.join(",", invalidOptions); if (failDeploymentWithInvalidJvmOptions) throw new IllegalArgumentException(message); else logger.logApplicationPackage(WARNING, message); }
String message = "Invalid JVM options in services.xml: " + String.join(",", invalidOptions);
private void validateJvmOptions(String jvmOptions) { if (jvmOptions == null || jvmOptions.isEmpty()) return; String[] optionList = jvmOptions.split(" "); List<String> invalidOptions = Arrays.stream(optionList) .filter(option -> !option.isEmpty()) .filter(option -> !Pattern.matches(validPattern.pattern(), option)) .sorted() .collect(Collectors.toList()); if (invalidOptions.isEmpty()) return; String message = "Invalid JVM options in services.xml: " + String.join(",", invalidOptions); if (failDeploymentWithInvalidJvmOptions) throw new IllegalArgumentException(message); else logger.logApplicationPackage(WARNING, message); }
class JvmOptions { private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:]+"); private final ContainerCluster<?> cluster; private final Element nodesElement; private final DeployLogger logger; private final boolean legacyOptions; private final boolean failDeploymentWithInvalidJvmOptions; public JvmOptions(ContainerCluster<?> cluster, Element nodesElement, DeployState deployState, boolean legacyOptions) { this.cluster = cluster; this.nodesElement = nodesElement; this.logger = deployState.getDeployLogger(); this.legacyOptions = legacyOptions; this.failDeploymentWithInvalidJvmOptions = deployState.featureFlags().failDeploymentWithInvalidJvmOptions(); } String build() { if (legacyOptions) return buildLegacyOptions(); Element jvmElement = XML.getChild(nodesElement, "jvm"); if (jvmElement == null) return ""; String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS); if (jvmOptions == null) return ""; validateJvmOptions(jvmOptions); return jvmOptions; } String buildLegacyOptions() { String jvmOptions; if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); validateJvmOptions(jvmOptions); if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + " and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into 'options' in 'jvm' element." + " See https: } } else { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); validateJvmOptions(jvmOptions); if (incompatibleGCOptions(jvmOptions)) { logger.logApplicationPackage(WARNING, "You need to move your GC-related options from deprecated 'jvmargs' to 'gc-options' in 'jvm' element." + " See https: cluster.setJvmGCOptions(ContainerCluster.G1GC); } } return jvmOptions; } }
class JvmOptions { private static final Pattern validPattern = Pattern.compile("-[a-zA-z0-9=:]+"); private final ContainerCluster<?> cluster; private final Element nodesElement; private final DeployLogger logger; private final boolean legacyOptions; private final boolean failDeploymentWithInvalidJvmOptions; public JvmOptions(ContainerCluster<?> cluster, Element nodesElement, DeployState deployState, boolean legacyOptions) { this.cluster = cluster; this.nodesElement = nodesElement; this.logger = deployState.getDeployLogger(); this.legacyOptions = legacyOptions; this.failDeploymentWithInvalidJvmOptions = deployState.featureFlags().failDeploymentWithInvalidJvmOptions(); } String build() { if (legacyOptions) return buildLegacyOptions(); Element jvmElement = XML.getChild(nodesElement, "jvm"); if (jvmElement == null) return ""; String jvmOptions = jvmElement.getAttribute(VespaDomBuilder.OPTIONS); if (jvmOptions == null) return ""; validateJvmOptions(jvmOptions); return jvmOptions; } String buildLegacyOptions() { String jvmOptions; if (nodesElement.hasAttribute(VespaDomBuilder.JVM_OPTIONS)) { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVM_OPTIONS); validateJvmOptions(jvmOptions); if (nodesElement.hasAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME)) { String jvmArgs = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); throw new IllegalArgumentException("You have specified both jvm-options='" + jvmOptions + "'" + " and deprecated jvmargs='" + jvmArgs + "'. Merge jvmargs into 'options' in 'jvm' element." + " See https: } } else { jvmOptions = nodesElement.getAttribute(VespaDomBuilder.JVMARGS_ATTRIB_NAME); validateJvmOptions(jvmOptions); if (incompatibleGCOptions(jvmOptions)) { logger.logApplicationPackage(WARNING, "You need to move your GC-related options from deprecated 'jvmargs' to 'gc-options' in 'jvm' element." + " See https: cluster.setJvmGCOptions(ContainerCluster.G1GC); } } return jvmOptions; } }
Nit: Since INFO is always logged, there is no need to pass a String supplier.
public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); Set<Long> newSessions = findNewSessionsInFileSystem(); try { for (LocalSession candidate : getLocalSessionsFromFileSystem()) { if (newSessions.contains(candidate.getSessionId())) { log.log(Level.INFO, () -> "Skipping session " + candidate.getSessionId() + ", newly created: "); continue; } Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && canBeDeleted(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); }
log.log(Level.INFO, () -> "Skipping session " + candidate.getSessionId() + ", newly created: ");
public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); Set<Long> newSessions = findNewSessionsInFileSystem(); try { for (LocalSession candidate : getLocalSessionsFromFileSystem()) { if (newSessions.contains(candidate.getSessionId())) { log.log(Level.INFO, () -> "Skipping session " + candidate.getSessionId() + ", newly created: "); continue; } Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && canBeDeleted(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final FileDistributionFactory fileDistributionFactory; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final int maxNodeSize; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, FileDistributionFactory fileDistributionFactory, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, int maxNodeSize) { this.tenantName = tenantName; sessionCounter = new SessionCounter(curator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.fileDistributionFactory = fileDistributionFactory; this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.maxNodeSize = maxNodeSize; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadSessions(executor); } void loadSessions(ExecutorService executor) { loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e)); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); if (remoteSessionCache.get(sessionId) == null) createRemoteSession(sessionId); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } /** Returns a copy of local sessions */ public Collection<LocalSession> getLocalSessions() { return List.copyOf(localSessionCache.values()); } public Set<LocalSession> getLocalSessionsFromFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return Set.of(); Set<LocalSession> sessionIds = new HashSet<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); LocalSession localSession = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); sessionIds.add(localSession); } return sessionIds; } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { params.vespaVersion().ifPresent(version -> { if ( ! params.isBootstrap() && ! modelFactoryRegistry.allVersions().contains(version)) throw new UnknownVespaVersionException("Vespa version '" + version + "' not known by this configserver"); }); applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Optional<CompletionWaiter> waiter = params.isDryRun() ? Optional.empty() : Optional.of(sessionZooKeeperClient.createPrepareWaiter()); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.ifPresent(w -> w.awaitCompletion(params.getTimeoutBudget().timeLeft())); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); session.setTenantSecretStores(existingSession.getTenantSecretStores()); session.setOperatorCertificates(existingSession.getOperatorCertificates()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { for (LocalSession session : getLocalSessions()) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } /** Returns a copy of remote sessions */ public Collection<RemoteSession> getRemoteSessions() { return List.copyOf(remoteSessionCache.values()); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); RemoteSession newSession = loadSessionIfActive(session).orElse(session); remoteSessionCache.put(sessionId, newSession); updateSessionStateWatcher(sessionId, newSession); return newSession; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { List<Long> remoteSessionsFromZooKeeper = getRemoteSessionsFromZooKeeper(); log.log(Level.FINE, () -> "Remote sessions for tenant " + tenantName + ": " + remoteSessionsFromZooKeeper); int deleted = 0; for (long sessionId : remoteSessionsFromZooKeeper) { Session session = remoteSessionCache.get(sessionId); if (session == null) { log.log(Level.FINE, () -> "Remote session " + sessionId + " is null, creating a new one"); session = new RemoteSession(tenantName, sessionId, createSessionZooKeeperClient(sessionId)); } if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } if (deleted >= 2) break; } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { Map<Long, Future<?>> futures = new HashMap<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.FINE, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { throw new RuntimeException("Could not load remote session " + sessionId, e); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } private Optional<RemoteSession> loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { Optional<Long> activeSession = applicationRepo.activeSessionOf(applicationId); if (activeSession.isPresent() && activeSession.get() == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return Optional.ofNullable(remoteSessionCache.get(session.getSessionId())); } } return Optional.empty(); } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter); log.log(Level.FINE, () -> "Done notifying upload for session " + sessionId); } void notifyCompletion(CompletionWaiter completionWaiter) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, () -> "Not able to notify completion for session (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, sessionPreparer.getExecutor(), curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); getRemoteSessions().forEach(session -> sessionMetrics.add(session.getStatus())); metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean canBeDeleted(LocalSession candidate) { return ! List.of(Session.Status.UNKNOWN, Session.Status.ACTIVATE).contains(candidate.getStatus()) || oldSessionDirWithNonExistingSession(candidate); } private boolean oldSessionDirWithNonExistingSession(LocalSession session) { File sessionDir = tenantFileSystemDirs.getUserApplicationDir(session.getSessionId()); return sessionDir.exists() && session.getStatus() == Session.Status.UNKNOWN && created(sessionDir).plus(Duration.ofDays(30)).isBefore(clock.instant()); } private Set<Long> findNewSessionsInFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); Set<Long> newSessions = new HashSet<>(); if (sessions != null) { for (File session : sessions) { try { if (Files.getLastModifiedTime(session.toPath()).toInstant() .isAfter(clock.instant().minus(Duration.ofSeconds(30)))) newSessions.add(Long.parseLong(session.getName())); } catch (IOException e) { log.log(Level.INFO, "Unable to find last modified time for " + session.toPath()); }; } } return newSessions; } private Instant created(File file) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(file.toPath(), BasicFileAttributes.class); return fileAttributes.creationTime().toInstant(); } catch (IOException e) { throw new UncheckedIOException(e); } } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (curator.exists(sessionPath)) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(Duration.ofSeconds(Math.min(120, timeoutBudget.timeLeft().getSeconds()))); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { applicationSet = Optional.ofNullable(getRemoteSession(sessionId)).map(this::ensureApplicationLoaded); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); moveSearchDefinitionsToSchemasDir(tempDestinationDir); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } private void moveSearchDefinitionsToSchemasDir(java.nio.file.Path applicationDir) throws IOException { File schemasDir = applicationDir.resolve(ApplicationPackage.SCHEMAS_DIR.getRelative()).toFile(); File sdDir = applicationDir.resolve(ApplicationPackage.SEARCH_DEFINITIONS_DIR.getRelative()).toFile(); if (sdDir.exists() && sdDir.isDirectory()) { File[] sdFiles = sdDir.listFiles(); if (sdFiles != null) { Files.createDirectories(schemasDir.toPath()); Arrays.asList(sdFiles).forEach(file -> Exceptions.uncheck( () -> Files.move(file.toPath(), schemasDir.toPath().resolve(file.toPath().getFileName()), StandardCopyOption.REPLACE_EXISTING))); } Files.delete(sdDir.toPath()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ZKApplication.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { return new SessionZooKeeperClient(curator, tenantName, sessionId, configserverConfig.serverId(), fileDistributionFactory.createFileManager(getSessionAppDir(sessionId)), maxNodeSize); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final FileDistributionFactory fileDistributionFactory; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final int maxNodeSize; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, FileDistributionFactory fileDistributionFactory, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, int maxNodeSize) { this.tenantName = tenantName; sessionCounter = new SessionCounter(curator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.fileDistributionFactory = fileDistributionFactory; this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.maxNodeSize = maxNodeSize; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadSessions(executor); } void loadSessions(ExecutorService executor) { loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e)); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); if (remoteSessionCache.get(sessionId) == null) createRemoteSession(sessionId); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } /** Returns a copy of local sessions */ public Collection<LocalSession> getLocalSessions() { return List.copyOf(localSessionCache.values()); } public Set<LocalSession> getLocalSessionsFromFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return Set.of(); Set<LocalSession> sessionIds = new HashSet<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); LocalSession localSession = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); sessionIds.add(localSession); } return sessionIds; } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { params.vespaVersion().ifPresent(version -> { if ( ! params.isBootstrap() && ! modelFactoryRegistry.allVersions().contains(version)) throw new UnknownVespaVersionException("Vespa version '" + version + "' not known by this configserver"); }); applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Optional<CompletionWaiter> waiter = params.isDryRun() ? Optional.empty() : Optional.of(sessionZooKeeperClient.createPrepareWaiter()); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.ifPresent(w -> w.awaitCompletion(params.getTimeoutBudget().timeLeft())); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); session.setTenantSecretStores(existingSession.getTenantSecretStores()); session.setOperatorCertificates(existingSession.getOperatorCertificates()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { for (LocalSession session : getLocalSessions()) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } /** Returns a copy of remote sessions */ public Collection<RemoteSession> getRemoteSessions() { return List.copyOf(remoteSessionCache.values()); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); RemoteSession newSession = loadSessionIfActive(session).orElse(session); remoteSessionCache.put(sessionId, newSession); updateSessionStateWatcher(sessionId, newSession); return newSession; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { List<Long> remoteSessionsFromZooKeeper = getRemoteSessionsFromZooKeeper(); log.log(Level.FINE, () -> "Remote sessions for tenant " + tenantName + ": " + remoteSessionsFromZooKeeper); int deleted = 0; for (long sessionId : remoteSessionsFromZooKeeper) { Session session = remoteSessionCache.get(sessionId); if (session == null) { log.log(Level.FINE, () -> "Remote session " + sessionId + " is null, creating a new one"); session = new RemoteSession(tenantName, sessionId, createSessionZooKeeperClient(sessionId)); } if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } if (deleted >= 2) break; } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { Map<Long, Future<?>> futures = new HashMap<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.FINE, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { throw new RuntimeException("Could not load remote session " + sessionId, e); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } private Optional<RemoteSession> loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { Optional<Long> activeSession = applicationRepo.activeSessionOf(applicationId); if (activeSession.isPresent() && activeSession.get() == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return Optional.ofNullable(remoteSessionCache.get(session.getSessionId())); } } return Optional.empty(); } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter); log.log(Level.FINE, () -> "Done notifying upload for session " + sessionId); } void notifyCompletion(CompletionWaiter completionWaiter) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, () -> "Not able to notify completion for session (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, sessionPreparer.getExecutor(), curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); getRemoteSessions().forEach(session -> sessionMetrics.add(session.getStatus())); metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean canBeDeleted(LocalSession candidate) { return ! List.of(Session.Status.UNKNOWN, Session.Status.ACTIVATE).contains(candidate.getStatus()) || oldSessionDirWithNonExistingSession(candidate); } private boolean oldSessionDirWithNonExistingSession(LocalSession session) { File sessionDir = tenantFileSystemDirs.getUserApplicationDir(session.getSessionId()); return sessionDir.exists() && session.getStatus() == Session.Status.UNKNOWN && created(sessionDir).plus(Duration.ofDays(30)).isBefore(clock.instant()); } private Set<Long> findNewSessionsInFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); Set<Long> newSessions = new HashSet<>(); if (sessions != null) { for (File session : sessions) { try { if (Files.getLastModifiedTime(session.toPath()).toInstant() .isAfter(clock.instant().minus(Duration.ofSeconds(30)))) newSessions.add(Long.parseLong(session.getName())); } catch (IOException e) { log.log(Level.INFO, "Unable to find last modified time for " + session.toPath()); }; } } return newSessions; } private Instant created(File file) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(file.toPath(), BasicFileAttributes.class); return fileAttributes.creationTime().toInstant(); } catch (IOException e) { throw new UncheckedIOException(e); } } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (curator.exists(sessionPath)) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(Duration.ofSeconds(Math.min(120, timeoutBudget.timeLeft().getSeconds()))); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { applicationSet = Optional.ofNullable(getRemoteSession(sessionId)).map(this::ensureApplicationLoaded); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); moveSearchDefinitionsToSchemasDir(tempDestinationDir); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } private void moveSearchDefinitionsToSchemasDir(java.nio.file.Path applicationDir) throws IOException { File schemasDir = applicationDir.resolve(ApplicationPackage.SCHEMAS_DIR.getRelative()).toFile(); File sdDir = applicationDir.resolve(ApplicationPackage.SEARCH_DEFINITIONS_DIR.getRelative()).toFile(); if (sdDir.exists() && sdDir.isDirectory()) { File[] sdFiles = sdDir.listFiles(); if (sdFiles != null) { Files.createDirectories(schemasDir.toPath()); Arrays.asList(sdFiles).forEach(file -> Exceptions.uncheck( () -> Files.move(file.toPath(), schemasDir.toPath().resolve(file.toPath().getFileName()), StandardCopyOption.REPLACE_EXISTING))); } Files.delete(sdDir.toPath()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ZKApplication.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { return new SessionZooKeeperClient(curator, tenantName, sessionId, configserverConfig.serverId(), fileDistributionFactory.createFileManager(getSessionAppDir(sessionId)), maxNodeSize); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Originally had it at level FINE, so forgot when changing. I'll change it to FINE in a few days if this change works as expected
public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); Set<Long> newSessions = findNewSessionsInFileSystem(); try { for (LocalSession candidate : getLocalSessionsFromFileSystem()) { if (newSessions.contains(candidate.getSessionId())) { log.log(Level.INFO, () -> "Skipping session " + candidate.getSessionId() + ", newly created: "); continue; } Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && canBeDeleted(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); }
log.log(Level.INFO, () -> "Skipping session " + candidate.getSessionId() + ", newly created: ");
public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); Set<LocalSession> toDelete = new HashSet<>(); Set<Long> newSessions = findNewSessionsInFileSystem(); try { for (LocalSession candidate : getLocalSessionsFromFileSystem()) { if (newSessions.contains(candidate.getSessionId())) { log.log(Level.INFO, () -> "Skipping session " + candidate.getSessionId() + ", newly created: "); continue; } Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && canBeDeleted(candidate)) { toDelete.add(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { toDelete.add(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } toDelete.forEach(this::deleteLocalSession); } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final FileDistributionFactory fileDistributionFactory; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final int maxNodeSize; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, FileDistributionFactory fileDistributionFactory, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, int maxNodeSize) { this.tenantName = tenantName; sessionCounter = new SessionCounter(curator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.fileDistributionFactory = fileDistributionFactory; this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.maxNodeSize = maxNodeSize; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadSessions(executor); } void loadSessions(ExecutorService executor) { loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e)); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); if (remoteSessionCache.get(sessionId) == null) createRemoteSession(sessionId); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } /** Returns a copy of local sessions */ public Collection<LocalSession> getLocalSessions() { return List.copyOf(localSessionCache.values()); } public Set<LocalSession> getLocalSessionsFromFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return Set.of(); Set<LocalSession> sessionIds = new HashSet<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); LocalSession localSession = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); sessionIds.add(localSession); } return sessionIds; } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { params.vespaVersion().ifPresent(version -> { if ( ! params.isBootstrap() && ! modelFactoryRegistry.allVersions().contains(version)) throw new UnknownVespaVersionException("Vespa version '" + version + "' not known by this configserver"); }); applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Optional<CompletionWaiter> waiter = params.isDryRun() ? Optional.empty() : Optional.of(sessionZooKeeperClient.createPrepareWaiter()); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.ifPresent(w -> w.awaitCompletion(params.getTimeoutBudget().timeLeft())); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); session.setTenantSecretStores(existingSession.getTenantSecretStores()); session.setOperatorCertificates(existingSession.getOperatorCertificates()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { for (LocalSession session : getLocalSessions()) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } /** Returns a copy of remote sessions */ public Collection<RemoteSession> getRemoteSessions() { return List.copyOf(remoteSessionCache.values()); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); RemoteSession newSession = loadSessionIfActive(session).orElse(session); remoteSessionCache.put(sessionId, newSession); updateSessionStateWatcher(sessionId, newSession); return newSession; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { List<Long> remoteSessionsFromZooKeeper = getRemoteSessionsFromZooKeeper(); log.log(Level.FINE, () -> "Remote sessions for tenant " + tenantName + ": " + remoteSessionsFromZooKeeper); int deleted = 0; for (long sessionId : remoteSessionsFromZooKeeper) { Session session = remoteSessionCache.get(sessionId); if (session == null) { log.log(Level.FINE, () -> "Remote session " + sessionId + " is null, creating a new one"); session = new RemoteSession(tenantName, sessionId, createSessionZooKeeperClient(sessionId)); } if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } if (deleted >= 2) break; } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { Map<Long, Future<?>> futures = new HashMap<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.FINE, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { throw new RuntimeException("Could not load remote session " + sessionId, e); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } private Optional<RemoteSession> loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { Optional<Long> activeSession = applicationRepo.activeSessionOf(applicationId); if (activeSession.isPresent() && activeSession.get() == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return Optional.ofNullable(remoteSessionCache.get(session.getSessionId())); } } return Optional.empty(); } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter); log.log(Level.FINE, () -> "Done notifying upload for session " + sessionId); } void notifyCompletion(CompletionWaiter completionWaiter) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, () -> "Not able to notify completion for session (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, sessionPreparer.getExecutor(), curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); getRemoteSessions().forEach(session -> sessionMetrics.add(session.getStatus())); metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean canBeDeleted(LocalSession candidate) { return ! List.of(Session.Status.UNKNOWN, Session.Status.ACTIVATE).contains(candidate.getStatus()) || oldSessionDirWithNonExistingSession(candidate); } private boolean oldSessionDirWithNonExistingSession(LocalSession session) { File sessionDir = tenantFileSystemDirs.getUserApplicationDir(session.getSessionId()); return sessionDir.exists() && session.getStatus() == Session.Status.UNKNOWN && created(sessionDir).plus(Duration.ofDays(30)).isBefore(clock.instant()); } private Set<Long> findNewSessionsInFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); Set<Long> newSessions = new HashSet<>(); if (sessions != null) { for (File session : sessions) { try { if (Files.getLastModifiedTime(session.toPath()).toInstant() .isAfter(clock.instant().minus(Duration.ofSeconds(30)))) newSessions.add(Long.parseLong(session.getName())); } catch (IOException e) { log.log(Level.INFO, "Unable to find last modified time for " + session.toPath()); }; } } return newSessions; } private Instant created(File file) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(file.toPath(), BasicFileAttributes.class); return fileAttributes.creationTime().toInstant(); } catch (IOException e) { throw new UncheckedIOException(e); } } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (curator.exists(sessionPath)) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(Duration.ofSeconds(Math.min(120, timeoutBudget.timeLeft().getSeconds()))); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { applicationSet = Optional.ofNullable(getRemoteSession(sessionId)).map(this::ensureApplicationLoaded); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); moveSearchDefinitionsToSchemasDir(tempDestinationDir); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } private void moveSearchDefinitionsToSchemasDir(java.nio.file.Path applicationDir) throws IOException { File schemasDir = applicationDir.resolve(ApplicationPackage.SCHEMAS_DIR.getRelative()).toFile(); File sdDir = applicationDir.resolve(ApplicationPackage.SEARCH_DEFINITIONS_DIR.getRelative()).toFile(); if (sdDir.exists() && sdDir.isDirectory()) { File[] sdFiles = sdDir.listFiles(); if (sdFiles != null) { Files.createDirectories(schemasDir.toPath()); Arrays.asList(sdFiles).forEach(file -> Exceptions.uncheck( () -> Files.move(file.toPath(), schemasDir.toPath().resolve(file.toPath().getFileName()), StandardCopyOption.REPLACE_EXISTING))); } Files.delete(sdDir.toPath()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ZKApplication.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { return new SessionZooKeeperClient(curator, tenantName, sessionId, configserverConfig.serverId(), fileDistributionFactory.createFileManager(getSessionAppDir(sessionId)), maxNodeSize); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final Object monitor = new Object(); private final Map<Long, LocalSession> localSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, RemoteSession> remoteSessionCache = Collections.synchronizedMap(new HashMap<>()); private final Map<Long, SessionStateWatcher> sessionStateWatchers = Collections.synchronizedMap(new HashMap<>()); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final FileDistributionFactory fileDistributionFactory; private final PermanentApplicationPackage permanentApplicationPackage; private final FlagSource flagSource; private final TenantFileSystemDirs tenantFileSystemDirs; private final Metrics metrics; private final MetricUpdater metricUpdater; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final SessionCounter sessionCounter; private final SecretStore secretStore; private final HostProvisionerProvider hostProvisionerProvider; private final ConfigserverConfig configserverConfig; private final ConfigServerDB configServerDB; private final Zone zone; private final ModelFactoryRegistry modelFactoryRegistry; private final ConfigDefinitionRepo configDefinitionRepo; private final int maxNodeSize; public SessionRepository(TenantName tenantName, TenantApplications applicationRepo, SessionPreparer sessionPreparer, Curator curator, Metrics metrics, StripedExecutor<TenantName> zkWatcherExecutor, FileDistributionFactory fileDistributionFactory, PermanentApplicationPackage permanentApplicationPackage, FlagSource flagSource, ExecutorService zkCacheExecutor, SecretStore secretStore, HostProvisionerProvider hostProvisionerProvider, ConfigserverConfig configserverConfig, ConfigServerDB configServerDB, Zone zone, Clock clock, ModelFactoryRegistry modelFactoryRegistry, ConfigDefinitionRepo configDefinitionRepo, int maxNodeSize) { this.tenantName = tenantName; sessionCounter = new SessionCounter(curator, tenantName); this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = clock; this.curator = curator; this.sessionLifetime = Duration.ofSeconds(configserverConfig.sessionLifetime()); this.zkWatcherExecutor = command -> zkWatcherExecutor.execute(tenantName, command); this.fileDistributionFactory = fileDistributionFactory; this.permanentApplicationPackage = permanentApplicationPackage; this.flagSource = flagSource; this.tenantFileSystemDirs = new TenantFileSystemDirs(configServerDB, tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.metrics = metrics; this.metricUpdater = metrics.getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.secretStore = secretStore; this.hostProvisionerProvider = hostProvisionerProvider; this.configserverConfig = configserverConfig; this.configServerDB = configServerDB; this.zone = zone; this.modelFactoryRegistry = modelFactoryRegistry; this.configDefinitionRepo = configDefinitionRepo; this.maxNodeSize = maxNodeSize; loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, zkCacheExecutor); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { ExecutorService executor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("load-sessions-")); loadSessions(executor); } void loadSessions(ExecutorService executor) { loadRemoteSessions(executor); try { executor.shutdown(); if ( ! executor.awaitTermination(1, TimeUnit.MINUTES)) log.log(Level.INFO, "Executor did not terminate"); } catch (InterruptedException e) { log.log(Level.WARNING, "Shutdown of executor for loading sessions failed: " + Exceptions.toMessageString(e)); } } public void addLocalSession(LocalSession session) { long sessionId = session.getSessionId(); localSessionCache.put(sessionId, session); if (remoteSessionCache.get(sessionId) == null) createRemoteSession(sessionId); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.get(sessionId); } /** Returns a copy of local sessions */ public Collection<LocalSession> getLocalSessions() { return List.copyOf(localSessionCache.values()); } public Set<LocalSession> getLocalSessionsFromFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return Set.of(); Set<LocalSession> sessionIds = new HashSet<>(); for (File session : sessions) { long sessionId = Long.parseLong(session.getName()); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); LocalSession localSession = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); sessionIds.add(localSession); } return sessionIds; } public ConfigChangeActions prepareLocalSession(Session session, DeployLogger logger, PrepareParams params, Instant now) { params.vespaVersion().ifPresent(version -> { if ( ! params.isBootstrap() && ! modelFactoryRegistry.allVersions().contains(version)) throw new UnknownVespaVersionException("Vespa version '" + version + "' not known by this configserver"); }); applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Optional<CompletionWaiter> waiter = params.isDryRun() ? Optional.empty() : Optional.of(sessionZooKeeperClient.createPrepareWaiter()); Optional<ApplicationSet> activeApplicationSet = getActiveApplicationSet(params.getApplicationId()); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, activeApplicationSet, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.ifPresent(w -> w.awaitCompletion(params.getTimeoutBudget().timeLeft())); return actions; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, boolean internalRedeploy, TimeoutBudget timeoutBudget) { ApplicationId existingApplicationId = existingSession.getApplicationId(); File existingApp = getSessionAppDir(existingSession.getSessionId()); LocalSession session = createSessionFromApplication(existingApp, existingApplicationId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); session.setTenantSecretStores(existingSession.getTenantSecretStores()); session.setOperatorCertificates(existingSession.getOperatorCertificates()); return session; } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromApplicationPackage(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget) { applicationRepo.createApplication(applicationId); return createSessionFromApplication(applicationDirectory, applicationId, false, timeoutBudget); } /** * Creates a local session based on a remote session and the distributed application package. * Does not wait for session being created on other servers. */ private void createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, false); createLocalSession(sessionId, applicationPackage); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.remove(sessionId); NestedTransaction transaction = new NestedTransaction(); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } private void deleteAllSessions() { for (LocalSession session : getLocalSessions()) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.get(sessionId); } /** Returns a copy of remote sessions */ public Collection<RemoteSession> getRemoteSessions() { return List.copyOf(remoteSessionCache.values()); } public List<Long> getRemoteSessionsFromZooKeeper() { return getSessionList(curator.getChildren(sessionsPath)); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); RemoteSession newSession = loadSessionIfActive(session).orElse(session); remoteSessionCache.put(sessionId, newSession); updateSessionStateWatcher(sessionId, newSession); return newSession; } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { List<Long> remoteSessionsFromZooKeeper = getRemoteSessionsFromZooKeeper(); log.log(Level.FINE, () -> "Remote sessions for tenant " + tenantName + ": " + remoteSessionsFromZooKeeper); int deleted = 0; for (long sessionId : remoteSessionsFromZooKeeper) { Session session = remoteSessionCache.get(sessionId); if (session == null) { log.log(Level.FINE, () -> "Remote session " + sessionId + " is null, creating a new one"); session = new RemoteSession(tenantName, sessionId, createSessionZooKeeperClient(sessionId)); } if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteRemoteSessionFromZooKeeper(session); deleted++; } if (deleted >= 2) break; } return deleted; } public void deactivateAndUpdateCache(RemoteSession remoteSession) { RemoteSession session = remoteSession.deactivated(); remoteSessionCache.put(session.getSessionId(), session); } public void deleteRemoteSessionFromZooKeeper(Session session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void loadRemoteSessions(ExecutorService executor) throws NumberFormatException { Map<Long, Future<?>> futures = new HashMap<>(); for (long sessionId : getRemoteSessionsFromZooKeeper()) { futures.put(sessionId, executor.submit(() -> sessionAdded(sessionId))); } futures.forEach((sessionId, future) -> { try { future.get(); log.log(Level.FINE, () -> "Remote session " + sessionId + " loaded"); } catch (ExecutionException | InterruptedException e) { throw new RuntimeException("Could not load remote session " + sessionId, e); } }); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { if (hasStatusDeleted(sessionId)) return; log.log(Level.FINE, () -> "Adding remote session " + sessionId); Session session = createRemoteSession(sessionId); if (session.getStatus() == Session.Status.NEW) { log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } createLocalSessionFromDistributedApplicationPackage(sessionId); } private boolean hasStatusDeleted(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); RemoteSession session = new RemoteSession(tenantName, sessionId, sessionZKClient); return session.getStatus() == Session.Status.DELETE; } void activate(RemoteSession session) { long sessionId = session.getSessionId(); CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Activating " + sessionId); applicationRepo.activateApplication(ensureApplicationLoaded(session), sessionId); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } private Optional<RemoteSession> loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { Optional<Long> activeSession = applicationRepo.activeSessionOf(applicationId); if (activeSession.isPresent() && activeSession.get() == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.activateApplication(ensureApplicationLoaded(session), session.getSessionId()); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return Optional.ofNullable(remoteSessionCache.get(session.getSessionId())); } } return Optional.empty(); } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter); } public ApplicationSet ensureApplicationLoaded(RemoteSession session) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } Optional<Long> activeSessionId = getActiveSessionId(session.getApplicationId()); Optional<ApplicationSet> previousApplicationSet = activeSessionId.filter(session::isNewerThan) .flatMap(this::getApplicationSet); ApplicationSet applicationSet = loadApplication(session, previousApplicationSet); RemoteSession activated = session.activated(applicationSet); long sessionId = activated.getSessionId(); remoteSessionCache.put(sessionId, activated); updateSessionStateWatcher(sessionId, activated); return applicationSet; } void confirmUpload(Session session) { CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, () -> "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter); log.log(Level.FINE, () -> "Done notifying upload for session " + sessionId); } void notifyCompletion(CompletionWaiter completionWaiter) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, () -> "Not able to notify completion for session (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(Session session, Optional<ApplicationSet> previousApplicationSet) { log.log(Level.FINE, () -> "Loading application for " + session); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, previousApplicationSet, sessionPreparer.getExecutor(), curator, metrics, permanentApplicationPackage, flagSource, secretStore, hostProvisionerProvider, configserverConfig, zone, modelFactoryRegistry, configDefinitionRepo); SettableOptional<AllocatedHosts> allocatedHosts = new SettableOptional<>(applicationPackage.getAllocatedHosts()); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, allocatedHosts, clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); getRemoteSessions().forEach(session -> sessionMetrics.add(session.getStatus())); metricUpdater.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metricUpdater.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metricUpdater.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metricUpdater.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; default: break; } }); } private boolean hasExpired(LocalSession candidate) { return candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant()); } private boolean canBeDeleted(LocalSession candidate) { return ! List.of(Session.Status.UNKNOWN, Session.Status.ACTIVATE).contains(candidate.getStatus()) || oldSessionDirWithNonExistingSession(candidate); } private boolean oldSessionDirWithNonExistingSession(LocalSession session) { File sessionDir = tenantFileSystemDirs.getUserApplicationDir(session.getSessionId()); return sessionDir.exists() && session.getStatus() == Session.Status.UNKNOWN && created(sessionDir).plus(Duration.ofDays(30)).isBefore(clock.instant()); } private Set<Long> findNewSessionsInFileSystem() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); Set<Long> newSessions = new HashSet<>(); if (sessions != null) { for (File session : sessions) { try { if (Files.getLastModifiedTime(session.toPath()).toInstant() .isAfter(clock.instant().minus(Duration.ofSeconds(30)))) newSessions.add(Long.parseLong(session.getName())); } catch (IOException e) { log.log(Level.INFO, "Unable to find last modified time for " + session.toPath()); }; } } return newSessions; } private Instant created(File file) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(file.toPath(), BasicFileAttributes.class); return fileAttributes.creationTime().toInstant(); } catch (IOException e) { throw new UncheckedIOException(e); } } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (curator.exists(sessionPath)) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(File applicationFile, ApplicationId applicationId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, internalRedeploy); log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, app, sessionZKClient); waiter.awaitCompletion(Duration.ofSeconds(Math.min(120, timeoutBudget.timeLeft().getSeconds()))); addLocalSession(session); return session; } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, boolean internalRedeploy) throws IOException { synchronized (monitor) { Optional<Long> activeSessionId = getActiveSessionId(applicationId); File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, activeSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } } public Optional<ApplicationSet> getActiveApplicationSet(ApplicationId appId) { return applicationRepo.activeSessionOf(appId).flatMap(this::getApplicationSet); } private Optional<ApplicationSet> getApplicationSet(long sessionId) { Optional<ApplicationSet> applicationSet = Optional.empty(); try { applicationSet = Optional.ofNullable(getRemoteSession(sessionId)).map(this::ensureApplicationLoaded); } catch (IllegalArgumentException e) { } return applicationSet; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) { log.log(Level.INFO, "Destination dir " + destinationDir + " already exists, app has already been copied"); return; } if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); moveSearchDefinitionsToSchemasDir(tempDestinationDir); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } private void moveSearchDefinitionsToSchemasDir(java.nio.file.Path applicationDir) throws IOException { File schemasDir = applicationDir.resolve(ApplicationPackage.SCHEMAS_DIR.getRelative()).toFile(); File sdDir = applicationDir.resolve(ApplicationPackage.SEARCH_DEFINITIONS_DIR.getRelative()).toFile(); if (sdDir.exists() && sdDir.isDirectory()) { File[] sdFiles = sdDir.listFiles(); if (sdFiles != null) { Files.createDirectories(schemasDir.toPath()); Arrays.asList(sdFiles).forEach(file -> Exceptions.uncheck( () -> Files.move(file.toPath(), schemasDir.toPath().resolve(file.toPath().getFileName()), StandardCopyOption.REPLACE_EXISTING))); } Files.delete(sdDir.toPath()); } } /** * Returns a new session instance for the given session id. */ void createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); createLocalSession(sessionId, applicationPackage); } void createLocalSession(long sessionId, ApplicationPackage applicationPackage) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); addLocalSession(session); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public void createLocalSessionFromDistributedApplicationPackage(long sessionId) { if (applicationRepo.sessionExistsInFileSystem(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); createSessionFromId(sessionId); return; } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(configserverConfig.fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return; } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); createLocalSession(sessionDir, applicationId, sessionId); } } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return sessionCounter.nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ZKApplication.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { return new SessionZooKeeperClient(curator, tenantName, sessionId, configserverConfig.serverId(), fileDistributionFactory.createFileManager(getSessionAppDir(sessionId)), maxNodeSize); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(configServerDB, tenantName).getUserApplicationDir(sessionId); } private void updateSessionStateWatcher(long sessionId, RemoteSession remoteSession) { SessionStateWatcher sessionStateWatcher = sessionStateWatchers.get(sessionId); if (sessionStateWatcher == null) { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, metricUpdater, zkWatcherExecutor, this)); } else { sessionStateWatcher.updateRemoteSession(remoteSession); } } @Override public String toString() { return getLocalSessions().toString(); } public Clock clock() { return clock; } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> existingSessions) { for (Iterator<RemoteSession> it = remoteSessionCache.values().iterator(); it.hasNext(); ) { long sessionId = it.next().sessionId; if (existingSessions.contains(sessionId)) continue; SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); it.remove(); metricUpdater.incRemovedSessions(); } } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.get(sessionId) == null) sessionAdded(sessionId); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } public Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Keys like `foo'bar"baz:bax` are not possible then, but that's OK.
public static void addItemsFromString(String string, WeightedSetItem out) { var s = new ParsableString(string); switch (s.peek()) { case '[' : addArrayItems(s, out); break; case '{' : addMapItems(s, out); break; default : throw new IllegalArgumentException("Expected a string starting by '[' or '{', " + "but was '" + s.peek() + "'"); } } private static void addArrayItems(ParsableString s, WeightedSetItem out) { s.pass('['); while (s.peek() != ']') { s.pass('['); long key = s.longTo(s.position(',')); s.pass(','); int value = s.intTo(s.position(']')); s.pass(']'); out.addToken(key, value); s.passOptional(','); if (s.atEnd()) throw new IllegalArgumentException("Expected an array ending by ']'"); } s.pass(']'); } private static void addMapItems(ParsableString s, WeightedSetItem out) { s.pass('{'); while (s.peek() != '}') { String key; if (s.passOptional('\'')) { key = s.stringTo(s.position('\'')); s.pass('\''); } else if (s.passOptional('"')) { key = s.stringTo(s.position('"')); s.pass('"'); } else { key = s.stringTo(s.position(':')).trim(); } s.pass(':'); int value = s.intTo(s.position(',','}')); out.addToken(key, value); s.passOptional(','); if (s.atEnd()) throw new IllegalArgumentException("Expected a map ending by '}'"); } s.pass('}');
}
public static void addItemsFromString(String string, WeightedSetItem out) { var s = new ParsableString(string); switch (s.peek()) { case '[' : addArrayItems(s, out); break; case '{' : addMapItems(s, out); break; default : throw new IllegalArgumentException("Expected a string starting by '[' or '{', " + "but was '" + s.peek() + "'"); } } private static void addArrayItems(ParsableString s, WeightedSetItem out) { s.pass('['); while (s.peek() != ']') { s.pass('['); long key = s.longTo(s.position(',')); s.pass(','); int value = s.intTo(s.position(']')); s.pass(']'); out.addToken(key, value); s.passOptional(','); if (s.atEnd()) throw new IllegalArgumentException("Expected an array ending by ']'"); } s.pass(']'); } private static void addMapItems(ParsableString s, WeightedSetItem out) { s.pass('{'); while (s.peek() != '}') { String key; if (s.passOptional('\'')) { key = s.stringTo(s.position('\'')); s.pass('\''); } else if (s.passOptional('"')) { key = s.stringTo(s.position('"')); s.pass('"'); } else { key = s.stringTo(s.position(':')).trim(); } s.pass(':'); int value = s.intTo(s.position(',','}')); out.addToken(key, value); s.passOptional(','); if (s.atEnd()) throw new IllegalArgumentException("Expected a map ending by '}'"); } s.pass('}
class ParameterListParser { '); }
class ParameterListParser { '); }
Do we need to allow this? The debug port won't be reachable in any case.
public void requireThatJvmOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmOptions(true, "options", "-Xms2G foo bar", "foo", "bar"); verifyLoggingOfJvmOptions(true, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(false, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(true, "options", "-Xms2G"); verifyLoggingOfJvmOptions(true, "options", "-verbose:gc"); verifyLoggingOfJvmOptions(true, "options", "-Djava.library.path=/opt/vespa/lib64:/home/y/lib64 -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"); verifyLoggingOfJvmOptions(false, "options", "-Xms2G"); }
verifyLoggingOfJvmOptions(true, "options", "-Djava.library.path=/opt/vespa/lib64:/home/y/lib64 -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005");
public void requireThatJvmOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmOptions(true, "options", "-Xms2G foo bar", "foo", "bar"); verifyLoggingOfJvmOptions(true, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(true, "options", "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005", "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"); verifyLoggingOfJvmOptions(false, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(true, "options", "-Xms2G"); verifyLoggingOfJvmOptions(true, "options", "-verbose:gc"); verifyLoggingOfJvmOptions(true, "options", "-Djava.library.path=/opt/vespa/lib64:/home/y/lib64"); verifyLoggingOfJvmOptions(false, "options", "-Xms2G"); }
class JvmOptionsTest extends ContainerModelBuilderTestBase { @Test public void verify_jvm_tag_with_attributes() throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <search/>" + " <nodes>" + " <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts()); assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions()); } @Test public void detect_conflicting_jvmgcoptions_in_jvmargs() { assertFalse(ContainerModelBuilder.incompatibleGCOptions("")); assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19")); } @Test public void honours_jvm_gc_options() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <search/>", " <nodes jvm-gc-options='-XX:+UseG1GC'>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts()); } private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException { verifyIgnoreJvmGCOptionsIfJvmArgs("jvmargs", ContainerCluster.G1GC, isHosted); verifyIgnoreJvmGCOptionsIfJvmArgs( "jvm-options", "-XX:+UseG1GC", isHosted); } private static void verifyIgnoreJvmGCOptionsIfJvmArgs(String jvmOptionsName, String expectedGC, boolean isHosted) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expectedGC, qrStartConfig.jvm().gcopts()); } @Test public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException { verifyIgnoreJvmGCOptions(false); verifyIgnoreJvmGCOptions(true); } private void verifyJvmGCOptions(boolean isHosted, String featureFlagDefault, String override, String expected) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setJvmGCOptions(featureFlagDefault).setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expected, qrStartConfig.jvm().gcopts()); } @Test public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException { verifyJvmGCOptions(false, null, null, ContainerCluster.G1GC); verifyJvmGCOptions(true, null, null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(true, "", null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(false, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(true, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, "-XX:+UseParallelGC", "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseParallelGC", "-XX:+UseParallelGC"); } @Test public void requireThatInvalidJvmGcOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8 foo bar", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseCMSInitiatingOccupancyOnly foo bar", "-XX:+UseCMSInitiatingOccupancyOnly", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC"); verifyLoggingOfJvmGcOptions(true, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(false, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8"); verifyLoggingOfJvmGcOptions(true, "-XX:MaxTenuringThreshold"); verifyLoggingOfJvmGcOptions(false, "-XX:+UseConcMarkSweepGC"); } @Test public void requireThatInvalidJvmGcOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "gc-options", "-XX:+ParallelGCThreads=8 foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM GC options in services.xml: bar,foo")); } } private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException { verifyLoggingOfJvmOptions(isHosted, "gc-options", override, invalidOptions); } private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException { TestLogger logger = new TestLogger(); buildModelWithJvmOptions(isHosted, logger, optionName, override); List<String> strings = Arrays.asList(invalidOptions.clone()); if (strings.isEmpty()) { assertEquals(logger.msgs.size() > 0 ? logger.msgs.get(0).getSecond() : "", 0, logger.msgs.size()); return; } Collections.sort(strings); Pair<Level, String> firstOption = logger.msgs.get(0); assertEquals(Level.WARNING, firstOption.getFirst()); assertEquals("Invalid JVM " + (optionName.equals("gc-options") ? "GC " : "") + "options in services.xml: " + String.join(",", strings), firstOption.getSecond()); } private void buildModelWithJvmOptions(boolean isHosted, TestLogger logger, String optionName, String override) throws IOException, SAXException { buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), logger, optionName, override); } private void buildModelWithJvmOptions(TestProperties properties, TestLogger logger, String optionName, String override) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes>" + " <jvm " + optionName + "='" + override + "'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build(); new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(app) .deployLogger(logger) .properties(properties) .build()); } @Test @Test public void requireThatInvalidJvmOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "options", "-Xms2G foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM options in services.xml: bar,foo")); } } }
class JvmOptionsTest extends ContainerModelBuilderTestBase { @Test public void verify_jvm_tag_with_attributes() throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <search/>" + " <nodes>" + " <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts()); assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions()); } @Test public void detect_conflicting_jvmgcoptions_in_jvmargs() { assertFalse(ContainerModelBuilder.incompatibleGCOptions("")); assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19")); } @Test public void honours_jvm_gc_options() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <search/>", " <nodes jvm-gc-options='-XX:+UseG1GC'>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts()); } private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException { verifyIgnoreJvmGCOptionsIfJvmArgs("jvmargs", ContainerCluster.G1GC, isHosted); verifyIgnoreJvmGCOptionsIfJvmArgs( "jvm-options", "-XX:+UseG1GC", isHosted); } private static void verifyIgnoreJvmGCOptionsIfJvmArgs(String jvmOptionsName, String expectedGC, boolean isHosted) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expectedGC, qrStartConfig.jvm().gcopts()); } @Test public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException { verifyIgnoreJvmGCOptions(false); verifyIgnoreJvmGCOptions(true); } private void verifyJvmGCOptions(boolean isHosted, String featureFlagDefault, String override, String expected) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setJvmGCOptions(featureFlagDefault).setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expected, qrStartConfig.jvm().gcopts()); } @Test public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException { verifyJvmGCOptions(false, null, null, ContainerCluster.G1GC); verifyJvmGCOptions(true, null, null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(true, "", null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(false, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(true, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, "-XX:+UseParallelGC", "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseParallelGC", "-XX:+UseParallelGC"); } @Test public void requireThatInvalidJvmGcOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8 foo bar", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseCMSInitiatingOccupancyOnly foo bar", "-XX:+UseCMSInitiatingOccupancyOnly", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC"); verifyLoggingOfJvmGcOptions(true, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(false, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8"); verifyLoggingOfJvmGcOptions(true, "-XX:MaxTenuringThreshold"); verifyLoggingOfJvmGcOptions(false, "-XX:+UseConcMarkSweepGC"); } @Test public void requireThatInvalidJvmGcOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "gc-options", "-XX:+ParallelGCThreads=8 foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM GC options in services.xml: bar,foo")); } } private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException { verifyLoggingOfJvmOptions(isHosted, "gc-options", override, invalidOptions); } private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException { TestLogger logger = new TestLogger(); buildModelWithJvmOptions(isHosted, logger, optionName, override); List<String> strings = Arrays.asList(invalidOptions.clone()); if (strings.isEmpty()) { assertEquals(logger.msgs.size() > 0 ? logger.msgs.get(0).getSecond() : "", 0, logger.msgs.size()); return; } assertTrue("Expected 1 or more log messages for invalid JM options, got none", logger.msgs.size() > 0); Pair<Level, String> firstOption = logger.msgs.get(0); assertEquals(Level.WARNING, firstOption.getFirst()); Collections.sort(strings); assertEquals("Invalid JVM " + (optionName.equals("gc-options") ? "GC " : "") + "options in services.xml: " + String.join(",", strings), firstOption.getSecond()); } private void buildModelWithJvmOptions(boolean isHosted, TestLogger logger, String optionName, String override) throws IOException, SAXException { buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), logger, optionName, override); } private void buildModelWithJvmOptions(TestProperties properties, TestLogger logger, String optionName, String override) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes>" + " <jvm " + optionName + "='" + override + "'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build(); new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(app) .deployLogger(logger) .properties(properties) .build()); } @Test @Test public void requireThatInvalidJvmOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "options", "-Xms2G foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM options in services.xml: bar,foo")); } } }
Not this particular one, that was just an example of one having a comma (taken from a non-hosted app). Maybe we should disallow `-Xrunjdwp:transport` completely in hosted? Right now we warn or fail deployment for hosted (depending on feature flag value) and warn for non-hosted for JVM options like these. A JVM option like the above is perfectly valid in non-hosted. If we only validate JVM options for hosted this PR is not necessary
public void requireThatJvmOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmOptions(true, "options", "-Xms2G foo bar", "foo", "bar"); verifyLoggingOfJvmOptions(true, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(false, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(true, "options", "-Xms2G"); verifyLoggingOfJvmOptions(true, "options", "-verbose:gc"); verifyLoggingOfJvmOptions(true, "options", "-Djava.library.path=/opt/vespa/lib64:/home/y/lib64 -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"); verifyLoggingOfJvmOptions(false, "options", "-Xms2G"); }
verifyLoggingOfJvmOptions(true, "options", "-Djava.library.path=/opt/vespa/lib64:/home/y/lib64 -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005");
public void requireThatJvmOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmOptions(true, "options", "-Xms2G foo bar", "foo", "bar"); verifyLoggingOfJvmOptions(true, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(true, "options", "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005", "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"); verifyLoggingOfJvmOptions(false, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(true, "options", "-Xms2G"); verifyLoggingOfJvmOptions(true, "options", "-verbose:gc"); verifyLoggingOfJvmOptions(true, "options", "-Djava.library.path=/opt/vespa/lib64:/home/y/lib64"); verifyLoggingOfJvmOptions(false, "options", "-Xms2G"); }
class JvmOptionsTest extends ContainerModelBuilderTestBase { @Test public void verify_jvm_tag_with_attributes() throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <search/>" + " <nodes>" + " <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts()); assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions()); } @Test public void detect_conflicting_jvmgcoptions_in_jvmargs() { assertFalse(ContainerModelBuilder.incompatibleGCOptions("")); assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19")); } @Test public void honours_jvm_gc_options() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <search/>", " <nodes jvm-gc-options='-XX:+UseG1GC'>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts()); } private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException { verifyIgnoreJvmGCOptionsIfJvmArgs("jvmargs", ContainerCluster.G1GC, isHosted); verifyIgnoreJvmGCOptionsIfJvmArgs( "jvm-options", "-XX:+UseG1GC", isHosted); } private static void verifyIgnoreJvmGCOptionsIfJvmArgs(String jvmOptionsName, String expectedGC, boolean isHosted) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expectedGC, qrStartConfig.jvm().gcopts()); } @Test public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException { verifyIgnoreJvmGCOptions(false); verifyIgnoreJvmGCOptions(true); } private void verifyJvmGCOptions(boolean isHosted, String featureFlagDefault, String override, String expected) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setJvmGCOptions(featureFlagDefault).setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expected, qrStartConfig.jvm().gcopts()); } @Test public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException { verifyJvmGCOptions(false, null, null, ContainerCluster.G1GC); verifyJvmGCOptions(true, null, null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(true, "", null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(false, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(true, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, "-XX:+UseParallelGC", "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseParallelGC", "-XX:+UseParallelGC"); } @Test public void requireThatInvalidJvmGcOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8 foo bar", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseCMSInitiatingOccupancyOnly foo bar", "-XX:+UseCMSInitiatingOccupancyOnly", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC"); verifyLoggingOfJvmGcOptions(true, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(false, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8"); verifyLoggingOfJvmGcOptions(true, "-XX:MaxTenuringThreshold"); verifyLoggingOfJvmGcOptions(false, "-XX:+UseConcMarkSweepGC"); } @Test public void requireThatInvalidJvmGcOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "gc-options", "-XX:+ParallelGCThreads=8 foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM GC options in services.xml: bar,foo")); } } private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException { verifyLoggingOfJvmOptions(isHosted, "gc-options", override, invalidOptions); } private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException { TestLogger logger = new TestLogger(); buildModelWithJvmOptions(isHosted, logger, optionName, override); List<String> strings = Arrays.asList(invalidOptions.clone()); if (strings.isEmpty()) { assertEquals(logger.msgs.size() > 0 ? logger.msgs.get(0).getSecond() : "", 0, logger.msgs.size()); return; } Collections.sort(strings); Pair<Level, String> firstOption = logger.msgs.get(0); assertEquals(Level.WARNING, firstOption.getFirst()); assertEquals("Invalid JVM " + (optionName.equals("gc-options") ? "GC " : "") + "options in services.xml: " + String.join(",", strings), firstOption.getSecond()); } private void buildModelWithJvmOptions(boolean isHosted, TestLogger logger, String optionName, String override) throws IOException, SAXException { buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), logger, optionName, override); } private void buildModelWithJvmOptions(TestProperties properties, TestLogger logger, String optionName, String override) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes>" + " <jvm " + optionName + "='" + override + "'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build(); new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(app) .deployLogger(logger) .properties(properties) .build()); } @Test @Test public void requireThatInvalidJvmOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "options", "-Xms2G foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM options in services.xml: bar,foo")); } } }
class JvmOptionsTest extends ContainerModelBuilderTestBase { @Test public void verify_jvm_tag_with_attributes() throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <search/>" + " <nodes>" + " <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts()); assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions()); } @Test public void detect_conflicting_jvmgcoptions_in_jvmargs() { assertFalse(ContainerModelBuilder.incompatibleGCOptions("")); assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19")); } @Test public void honours_jvm_gc_options() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <search/>", " <nodes jvm-gc-options='-XX:+UseG1GC'>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts()); } private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException { verifyIgnoreJvmGCOptionsIfJvmArgs("jvmargs", ContainerCluster.G1GC, isHosted); verifyIgnoreJvmGCOptionsIfJvmArgs( "jvm-options", "-XX:+UseG1GC", isHosted); } private static void verifyIgnoreJvmGCOptionsIfJvmArgs(String jvmOptionsName, String expectedGC, boolean isHosted) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expectedGC, qrStartConfig.jvm().gcopts()); } @Test public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException { verifyIgnoreJvmGCOptions(false); verifyIgnoreJvmGCOptions(true); } private void verifyJvmGCOptions(boolean isHosted, String featureFlagDefault, String override, String expected) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setJvmGCOptions(featureFlagDefault).setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expected, qrStartConfig.jvm().gcopts()); } @Test public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException { verifyJvmGCOptions(false, null, null, ContainerCluster.G1GC); verifyJvmGCOptions(true, null, null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(true, "", null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(false, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(true, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, "-XX:+UseParallelGC", "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseParallelGC", "-XX:+UseParallelGC"); } @Test public void requireThatInvalidJvmGcOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8 foo bar", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseCMSInitiatingOccupancyOnly foo bar", "-XX:+UseCMSInitiatingOccupancyOnly", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC"); verifyLoggingOfJvmGcOptions(true, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(false, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8"); verifyLoggingOfJvmGcOptions(true, "-XX:MaxTenuringThreshold"); verifyLoggingOfJvmGcOptions(false, "-XX:+UseConcMarkSweepGC"); } @Test public void requireThatInvalidJvmGcOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "gc-options", "-XX:+ParallelGCThreads=8 foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM GC options in services.xml: bar,foo")); } } private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException { verifyLoggingOfJvmOptions(isHosted, "gc-options", override, invalidOptions); } private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException { TestLogger logger = new TestLogger(); buildModelWithJvmOptions(isHosted, logger, optionName, override); List<String> strings = Arrays.asList(invalidOptions.clone()); if (strings.isEmpty()) { assertEquals(logger.msgs.size() > 0 ? logger.msgs.get(0).getSecond() : "", 0, logger.msgs.size()); return; } assertTrue("Expected 1 or more log messages for invalid JM options, got none", logger.msgs.size() > 0); Pair<Level, String> firstOption = logger.msgs.get(0); assertEquals(Level.WARNING, firstOption.getFirst()); Collections.sort(strings); assertEquals("Invalid JVM " + (optionName.equals("gc-options") ? "GC " : "") + "options in services.xml: " + String.join(",", strings), firstOption.getSecond()); } private void buildModelWithJvmOptions(boolean isHosted, TestLogger logger, String optionName, String override) throws IOException, SAXException { buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), logger, optionName, override); } private void buildModelWithJvmOptions(TestProperties properties, TestLogger logger, String optionName, String override) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes>" + " <jvm " + optionName + "='" + override + "'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build(); new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(app) .deployLogger(logger) .properties(properties) .build()); } @Test @Test public void requireThatInvalidJvmOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "options", "-Xms2G foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM options in services.xml: bar,foo")); } } }
> Maybe we should disallow -Xrunjdwp:transport completely in hosted? Yes, it will prevent confusion about why debugging doesn't work at least. > Right now we warn or fail deployment for hosted (depending on feature flag value) and warn for non-hosted for JVM options like these. :+1:
public void requireThatJvmOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmOptions(true, "options", "-Xms2G foo bar", "foo", "bar"); verifyLoggingOfJvmOptions(true, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(false, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(true, "options", "-Xms2G"); verifyLoggingOfJvmOptions(true, "options", "-verbose:gc"); verifyLoggingOfJvmOptions(true, "options", "-Djava.library.path=/opt/vespa/lib64:/home/y/lib64 -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"); verifyLoggingOfJvmOptions(false, "options", "-Xms2G"); }
verifyLoggingOfJvmOptions(true, "options", "-Djava.library.path=/opt/vespa/lib64:/home/y/lib64 -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005");
public void requireThatJvmOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmOptions(true, "options", "-Xms2G foo bar", "foo", "bar"); verifyLoggingOfJvmOptions(true, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(true, "options", "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005", "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"); verifyLoggingOfJvmOptions(false, "options", "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmOptions(true, "options", "-Xms2G"); verifyLoggingOfJvmOptions(true, "options", "-verbose:gc"); verifyLoggingOfJvmOptions(true, "options", "-Djava.library.path=/opt/vespa/lib64:/home/y/lib64"); verifyLoggingOfJvmOptions(false, "options", "-Xms2G"); }
class JvmOptionsTest extends ContainerModelBuilderTestBase { @Test public void verify_jvm_tag_with_attributes() throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <search/>" + " <nodes>" + " <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts()); assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions()); } @Test public void detect_conflicting_jvmgcoptions_in_jvmargs() { assertFalse(ContainerModelBuilder.incompatibleGCOptions("")); assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19")); } @Test public void honours_jvm_gc_options() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <search/>", " <nodes jvm-gc-options='-XX:+UseG1GC'>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts()); } private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException { verifyIgnoreJvmGCOptionsIfJvmArgs("jvmargs", ContainerCluster.G1GC, isHosted); verifyIgnoreJvmGCOptionsIfJvmArgs( "jvm-options", "-XX:+UseG1GC", isHosted); } private static void verifyIgnoreJvmGCOptionsIfJvmArgs(String jvmOptionsName, String expectedGC, boolean isHosted) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expectedGC, qrStartConfig.jvm().gcopts()); } @Test public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException { verifyIgnoreJvmGCOptions(false); verifyIgnoreJvmGCOptions(true); } private void verifyJvmGCOptions(boolean isHosted, String featureFlagDefault, String override, String expected) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setJvmGCOptions(featureFlagDefault).setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expected, qrStartConfig.jvm().gcopts()); } @Test public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException { verifyJvmGCOptions(false, null, null, ContainerCluster.G1GC); verifyJvmGCOptions(true, null, null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(true, "", null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(false, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(true, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, "-XX:+UseParallelGC", "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseParallelGC", "-XX:+UseParallelGC"); } @Test public void requireThatInvalidJvmGcOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8 foo bar", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseCMSInitiatingOccupancyOnly foo bar", "-XX:+UseCMSInitiatingOccupancyOnly", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC"); verifyLoggingOfJvmGcOptions(true, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(false, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8"); verifyLoggingOfJvmGcOptions(true, "-XX:MaxTenuringThreshold"); verifyLoggingOfJvmGcOptions(false, "-XX:+UseConcMarkSweepGC"); } @Test public void requireThatInvalidJvmGcOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "gc-options", "-XX:+ParallelGCThreads=8 foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM GC options in services.xml: bar,foo")); } } private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException { verifyLoggingOfJvmOptions(isHosted, "gc-options", override, invalidOptions); } private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException { TestLogger logger = new TestLogger(); buildModelWithJvmOptions(isHosted, logger, optionName, override); List<String> strings = Arrays.asList(invalidOptions.clone()); if (strings.isEmpty()) { assertEquals(logger.msgs.size() > 0 ? logger.msgs.get(0).getSecond() : "", 0, logger.msgs.size()); return; } Collections.sort(strings); Pair<Level, String> firstOption = logger.msgs.get(0); assertEquals(Level.WARNING, firstOption.getFirst()); assertEquals("Invalid JVM " + (optionName.equals("gc-options") ? "GC " : "") + "options in services.xml: " + String.join(",", strings), firstOption.getSecond()); } private void buildModelWithJvmOptions(boolean isHosted, TestLogger logger, String optionName, String override) throws IOException, SAXException { buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), logger, optionName, override); } private void buildModelWithJvmOptions(TestProperties properties, TestLogger logger, String optionName, String override) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes>" + " <jvm " + optionName + "='" + override + "'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build(); new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(app) .deployLogger(logger) .properties(properties) .build()); } @Test @Test public void requireThatInvalidJvmOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "options", "-Xms2G foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM options in services.xml: bar,foo")); } } }
class JvmOptionsTest extends ContainerModelBuilderTestBase { @Test public void verify_jvm_tag_with_attributes() throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <search/>" + " <nodes>" + " <jvm options='-XX:SoftRefLRUPolicyMSPerMB=2500' gc-options='-XX:+UseParNewGC' allocated-memory='45%'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseParNewGC", qrStartConfig.jvm().gcopts()); assertEquals(45, qrStartConfig.jvm().heapSizeAsPercentageOfPhysicalMemory()); assertEquals("-XX:SoftRefLRUPolicyMSPerMB=2500", model.getContainerClusters().values().iterator().next().getContainers().get(0).getJvmOptions()); } @Test public void detect_conflicting_jvmgcoptions_in_jvmargs() { assertFalse(ContainerModelBuilder.incompatibleGCOptions("")); assertFalse(ContainerModelBuilder.incompatibleGCOptions("UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:+UseG1GC")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("abc -XX:+UseParNewGC xyz")); assertTrue(ContainerModelBuilder.incompatibleGCOptions("-XX:CMSInitiatingOccupancyFraction=19")); } @Test public void honours_jvm_gc_options() { Element clusterElem = DomBuilderTest.parse( "<container version='1.0'>", " <search/>", " <nodes jvm-gc-options='-XX:+UseG1GC'>", " <node hostalias='mockhost'/>", " </nodes>", "</container>" ); createModel(root, clusterElem); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); root.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals("-XX:+UseG1GC", qrStartConfig.jvm().gcopts()); } private static void verifyIgnoreJvmGCOptions(boolean isHosted) throws IOException, SAXException { verifyIgnoreJvmGCOptionsIfJvmArgs("jvmargs", ContainerCluster.G1GC, isHosted); verifyIgnoreJvmGCOptionsIfJvmArgs( "jvm-options", "-XX:+UseG1GC", isHosted); } private static void verifyIgnoreJvmGCOptionsIfJvmArgs(String jvmOptionsName, String expectedGC, boolean isHosted) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes jvm-gc-options='-XX:+UseG1GC' " + jvmOptionsName + "='-XX:+UseParNewGC'>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expectedGC, qrStartConfig.jvm().gcopts()); } @Test public void ignores_jvmgcoptions_on_conflicting_jvmargs() throws IOException, SAXException { verifyIgnoreJvmGCOptions(false); verifyIgnoreJvmGCOptions(true); } private void verifyJvmGCOptions(boolean isHosted, String featureFlagDefault, String override, String expected) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes " + ((override == null) ? ">" : ("jvm-gc-options='" + override + "'>")) + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage applicationPackage = new MockApplicationPackage.Builder().withServices(servicesXml).build(); final TestLogger logger = new TestLogger(); VespaModel model = new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(applicationPackage) .deployLogger(logger) .properties(new TestProperties().setJvmGCOptions(featureFlagDefault).setHostedVespa(isHosted)) .build()); QrStartConfig.Builder qrStartBuilder = new QrStartConfig.Builder(); model.getConfig(qrStartBuilder, "container/container.0"); QrStartConfig qrStartConfig = new QrStartConfig(qrStartBuilder); assertEquals(expected, qrStartConfig.jvm().gcopts()); } @Test public void requireThatJvmGCOptionsIsHonoured() throws IOException, SAXException { verifyJvmGCOptions(false, null, null, ContainerCluster.G1GC); verifyJvmGCOptions(true, null, null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(true, "", null, ContainerCluster.PARALLEL_GC); verifyJvmGCOptions(false, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(true, "-XX:+UseG1GC", null, "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, "-XX:+UseParallelGC", "-XX:+UseG1GC", "-XX:+UseG1GC"); verifyJvmGCOptions(false, null, "-XX:+UseParallelGC", "-XX:+UseParallelGC"); } @Test public void requireThatInvalidJvmGcOptionsAreLogged() throws IOException, SAXException { verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8 foo bar", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseCMSInitiatingOccupancyOnly foo bar", "-XX:+UseCMSInitiatingOccupancyOnly", "foo", "bar"); verifyLoggingOfJvmGcOptions(true, "-XX:+UseConcMarkSweepGC", "-XX:+UseConcMarkSweepGC"); verifyLoggingOfJvmGcOptions(true, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(false, "$(touch /tmp/hello-from-gc-options)", "$(touch", "/tmp/hello-from-gc-options)"); verifyLoggingOfJvmGcOptions(true, "-XX:+ParallelGCThreads=8"); verifyLoggingOfJvmGcOptions(true, "-XX:MaxTenuringThreshold"); verifyLoggingOfJvmGcOptions(false, "-XX:+UseConcMarkSweepGC"); } @Test public void requireThatInvalidJvmGcOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "gc-options", "-XX:+ParallelGCThreads=8 foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM GC options in services.xml: bar,foo")); } } private void verifyLoggingOfJvmGcOptions(boolean isHosted, String override, String... invalidOptions) throws IOException, SAXException { verifyLoggingOfJvmOptions(isHosted, "gc-options", override, invalidOptions); } private void verifyLoggingOfJvmOptions(boolean isHosted, String optionName, String override, String... invalidOptions) throws IOException, SAXException { TestLogger logger = new TestLogger(); buildModelWithJvmOptions(isHosted, logger, optionName, override); List<String> strings = Arrays.asList(invalidOptions.clone()); if (strings.isEmpty()) { assertEquals(logger.msgs.size() > 0 ? logger.msgs.get(0).getSecond() : "", 0, logger.msgs.size()); return; } assertTrue("Expected 1 or more log messages for invalid JM options, got none", logger.msgs.size() > 0); Pair<Level, String> firstOption = logger.msgs.get(0); assertEquals(Level.WARNING, firstOption.getFirst()); Collections.sort(strings); assertEquals("Invalid JVM " + (optionName.equals("gc-options") ? "GC " : "") + "options in services.xml: " + String.join(",", strings), firstOption.getSecond()); } private void buildModelWithJvmOptions(boolean isHosted, TestLogger logger, String optionName, String override) throws IOException, SAXException { buildModelWithJvmOptions(new TestProperties().setHostedVespa(isHosted), logger, optionName, override); } private void buildModelWithJvmOptions(TestProperties properties, TestLogger logger, String optionName, String override) throws IOException, SAXException { String servicesXml = "<container version='1.0'>" + " <nodes>" + " <jvm " + optionName + "='" + override + "'/>" + " <node hostalias='mockhost'/>" + " </nodes>" + "</container>"; ApplicationPackage app = new MockApplicationPackage.Builder().withServices(servicesXml).build(); new VespaModel(new NullConfigModelRegistry(), new DeployState.Builder() .applicationPackage(app) .deployLogger(logger) .properties(properties) .build()); } @Test @Test public void requireThatInvalidJvmOptionsFailDeployment() throws IOException, SAXException { try { buildModelWithJvmOptions(new TestProperties().setHostedVespa(true).failDeploymentWithInvalidJvmOptions(true), new TestLogger(), "options", "-Xms2G foo bar"); fail(); } catch (IllegalArgumentException e) { assertTrue(e.getMessage().contains("Invalid JVM options in services.xml: bar,foo")); } } }
Nit: Variable should be camel-case.
public String zooKeeperSnapshotMethod() { String vespa_zookeeper_snapshot_method = System.getenv("VESPA_ZOOKEEPER_SNAPSHOT_METHOD"); return vespa_zookeeper_snapshot_method == null ? "" : vespa_zookeeper_snapshot_method; }
String vespa_zookeeper_snapshot_method = System.getenv("VESPA_ZOOKEEPER_SNAPSHOT_METHOD");
public String zooKeeperSnapshotMethod() { String vespaZookeeperSnapshotMethod = System.getenv("VESPA_ZOOKEEPER_SNAPSHOT_METHOD"); return vespaZookeeperSnapshotMethod == null ? "" : vespaZookeeperSnapshotMethod; }
class CloudConfigInstallVariables implements CloudConfigOptions { @Override public Optional<Integer> rpcPort() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_RPC_PORT")) .or(() -> getRawInstallVariable("services.port_configserver_rpc")) .map(Integer::parseInt); } @Override public Optional<Boolean> multiTenant() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_MULTITENANT")) .or(() -> getInstallVariable("multitenant")) .map(Boolean::parseBoolean); } @Override public ConfigServer[] allConfigServers() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVERS")) .or(() -> getRawInstallVariable("services.addr_configserver")) .map(CloudConfigInstallVariables::toConfigServers) .orElseGet(() -> new ConfigServer[0]); } @Override public int[] configServerZookeeperIds() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_ZOOKEEPER_IDS")) .map(CloudConfigInstallVariables::multiValueParameterStream) .orElseGet(Stream::empty) .mapToInt(Integer::valueOf) .toArray(); } @Override public Optional<Long> zookeeperBarrierTimeout() { return getInstallVariable("zookeeper_barrier_timeout", Long::parseLong); } @Override public Optional<Long> sessionLifeTimeSecs() { return getInstallVariable("session_lifetime", Long::parseLong); } @Override public String[] configModelPluginDirs() { return getRawInstallVariable("cloudconfig_server.config_model_plugin_dirs") .map(CloudConfigInstallVariables::toConfigModelsPluginDir) .orElseGet(() -> new String[0]); } @Override public Optional<Integer> zookeeperClientPort() { return getInstallVariable("zookeeper_clientPort", Integer::parseInt); } @Override public Optional<Integer> zookeeperQuorumPort() { return getInstallVariable("zookeeper_quorumPort", Integer::parseInt); } @Override public Optional<Integer> zookeeperElectionPort() { return getInstallVariable("zookeeper_electionPort", Integer::parseInt); } @Override public Optional<String> environment() { return Optional.ofNullable(System.getenv("VESPA_ENVIRONMENT")) .or(() -> getInstallVariable("environment")); } @Override public Optional<String> region() { return Optional.ofNullable(System.getenv("VESPA_REGION")) .or(() -> getInstallVariable("region")); } @Override public Optional<String> system() { return Optional.ofNullable(System.getenv("VESPA_SYSTEM")) .or(() -> getInstallVariable("system")); } @Override public Optional<Boolean> useVespaVersionInRequest() { return getInstallVariable("use_vespa_version_in_request", Boolean::parseBoolean); } @Override public Optional<Boolean> hostedVespa() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_HOSTED")) .or(() -> getInstallVariable("hosted_vespa")) .map(Boolean::parseBoolean); } @Override public Optional<String> loadBalancerAddress() { return getInstallVariable("load_balancer_address"); } @Override public Optional<String> athenzDnsSuffix() { return getInstallVariable("athenz_dns_suffix"); } @Override public Optional<String> ztsUrl() { return getInstallVariable("zts_url"); } @Override static ConfigServer[] toConfigServers(String configserversString) { return multiValueParameterStream(configserversString) .map(CloudConfigInstallVariables::toConfigServer) .toArray(ConfigServer[]::new); } static ConfigServer toConfigServer(String configserverString) { try { String[] hostPortTuple = configserverString.split(":"); if (configserverString.contains(":")) { return new ConfigServer(hostPortTuple[0], Optional.of(Integer.parseInt(hostPortTuple[1]))); } else { return new ConfigServer(configserverString, Optional.empty()); } } catch (Exception e) { throw new IllegalArgumentException("Invalid config server " + configserverString, e); } } static String[] toConfigModelsPluginDir(String configModelsPluginDirString) { return multiValueParameterStream(configModelsPluginDirString).toArray(String[]::new); } private static Optional<String> getInstallVariable(String name) { return getInstallVariable(name, Function.identity()); } private static <T> Optional<T> getInstallVariable(String name, Function<String, T> converter) { return getRawInstallVariable("cloudconfig_server." + name).map(converter); } private static Optional<String> getRawInstallVariable(String name) { return Optional.ofNullable( Optional.ofNullable(System.getenv(name.replace(".", "__"))) .orElseGet(() -> System.getProperty(name))); } private static Stream<String> multiValueParameterStream(String param) { return Arrays.stream(param.split("[, ]")).filter(value -> !value.isEmpty()); } }
class CloudConfigInstallVariables implements CloudConfigOptions { @Override public Optional<Integer> rpcPort() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_RPC_PORT")) .or(() -> getRawInstallVariable("services.port_configserver_rpc")) .map(Integer::parseInt); } @Override public Optional<Boolean> multiTenant() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_MULTITENANT")) .or(() -> getInstallVariable("multitenant")) .map(Boolean::parseBoolean); } @Override public ConfigServer[] allConfigServers() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVERS")) .or(() -> getRawInstallVariable("services.addr_configserver")) .map(CloudConfigInstallVariables::toConfigServers) .orElseGet(() -> new ConfigServer[0]); } @Override public int[] configServerZookeeperIds() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_ZOOKEEPER_IDS")) .map(CloudConfigInstallVariables::multiValueParameterStream) .orElseGet(Stream::empty) .mapToInt(Integer::valueOf) .toArray(); } @Override public Optional<Long> zookeeperBarrierTimeout() { return getInstallVariable("zookeeper_barrier_timeout", Long::parseLong); } @Override public Optional<Long> sessionLifeTimeSecs() { return getInstallVariable("session_lifetime", Long::parseLong); } @Override public String[] configModelPluginDirs() { return getRawInstallVariable("cloudconfig_server.config_model_plugin_dirs") .map(CloudConfigInstallVariables::toConfigModelsPluginDir) .orElseGet(() -> new String[0]); } @Override public Optional<Integer> zookeeperClientPort() { return getInstallVariable("zookeeper_clientPort", Integer::parseInt); } @Override public Optional<Integer> zookeeperQuorumPort() { return getInstallVariable("zookeeper_quorumPort", Integer::parseInt); } @Override public Optional<Integer> zookeeperElectionPort() { return getInstallVariable("zookeeper_electionPort", Integer::parseInt); } @Override public Optional<String> environment() { return Optional.ofNullable(System.getenv("VESPA_ENVIRONMENT")) .or(() -> getInstallVariable("environment")); } @Override public Optional<String> region() { return Optional.ofNullable(System.getenv("VESPA_REGION")) .or(() -> getInstallVariable("region")); } @Override public Optional<String> system() { return Optional.ofNullable(System.getenv("VESPA_SYSTEM")) .or(() -> getInstallVariable("system")); } @Override public Optional<Boolean> useVespaVersionInRequest() { return getInstallVariable("use_vespa_version_in_request", Boolean::parseBoolean); } @Override public Optional<Boolean> hostedVespa() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_HOSTED")) .or(() -> getInstallVariable("hosted_vespa")) .map(Boolean::parseBoolean); } @Override public Optional<String> loadBalancerAddress() { return getInstallVariable("load_balancer_address"); } @Override public Optional<String> athenzDnsSuffix() { return getInstallVariable("athenz_dns_suffix"); } @Override public Optional<String> ztsUrl() { return getInstallVariable("zts_url"); } @Override static ConfigServer[] toConfigServers(String configserversString) { return multiValueParameterStream(configserversString) .map(CloudConfigInstallVariables::toConfigServer) .toArray(ConfigServer[]::new); } static ConfigServer toConfigServer(String configserverString) { try { String[] hostPortTuple = configserverString.split(":"); if (configserverString.contains(":")) { return new ConfigServer(hostPortTuple[0], Optional.of(Integer.parseInt(hostPortTuple[1]))); } else { return new ConfigServer(configserverString, Optional.empty()); } } catch (Exception e) { throw new IllegalArgumentException("Invalid config server " + configserverString, e); } } static String[] toConfigModelsPluginDir(String configModelsPluginDirString) { return multiValueParameterStream(configModelsPluginDirString).toArray(String[]::new); } private static Optional<String> getInstallVariable(String name) { return getInstallVariable(name, Function.identity()); } private static <T> Optional<T> getInstallVariable(String name, Function<String, T> converter) { return getRawInstallVariable("cloudconfig_server." + name).map(converter); } private static Optional<String> getRawInstallVariable(String name) { return Optional.ofNullable( Optional.ofNullable(System.getenv(name.replace(".", "__"))) .orElseGet(() -> System.getProperty(name))); } private static Stream<String> multiValueParameterStream(String param) { return Arrays.stream(param.split("[, ]")).filter(value -> !value.isEmpty()); } }
Yup, thanks
public String zooKeeperSnapshotMethod() { String vespa_zookeeper_snapshot_method = System.getenv("VESPA_ZOOKEEPER_SNAPSHOT_METHOD"); return vespa_zookeeper_snapshot_method == null ? "" : vespa_zookeeper_snapshot_method; }
String vespa_zookeeper_snapshot_method = System.getenv("VESPA_ZOOKEEPER_SNAPSHOT_METHOD");
public String zooKeeperSnapshotMethod() { String vespaZookeeperSnapshotMethod = System.getenv("VESPA_ZOOKEEPER_SNAPSHOT_METHOD"); return vespaZookeeperSnapshotMethod == null ? "" : vespaZookeeperSnapshotMethod; }
class CloudConfigInstallVariables implements CloudConfigOptions { @Override public Optional<Integer> rpcPort() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_RPC_PORT")) .or(() -> getRawInstallVariable("services.port_configserver_rpc")) .map(Integer::parseInt); } @Override public Optional<Boolean> multiTenant() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_MULTITENANT")) .or(() -> getInstallVariable("multitenant")) .map(Boolean::parseBoolean); } @Override public ConfigServer[] allConfigServers() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVERS")) .or(() -> getRawInstallVariable("services.addr_configserver")) .map(CloudConfigInstallVariables::toConfigServers) .orElseGet(() -> new ConfigServer[0]); } @Override public int[] configServerZookeeperIds() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_ZOOKEEPER_IDS")) .map(CloudConfigInstallVariables::multiValueParameterStream) .orElseGet(Stream::empty) .mapToInt(Integer::valueOf) .toArray(); } @Override public Optional<Long> zookeeperBarrierTimeout() { return getInstallVariable("zookeeper_barrier_timeout", Long::parseLong); } @Override public Optional<Long> sessionLifeTimeSecs() { return getInstallVariable("session_lifetime", Long::parseLong); } @Override public String[] configModelPluginDirs() { return getRawInstallVariable("cloudconfig_server.config_model_plugin_dirs") .map(CloudConfigInstallVariables::toConfigModelsPluginDir) .orElseGet(() -> new String[0]); } @Override public Optional<Integer> zookeeperClientPort() { return getInstallVariable("zookeeper_clientPort", Integer::parseInt); } @Override public Optional<Integer> zookeeperQuorumPort() { return getInstallVariable("zookeeper_quorumPort", Integer::parseInt); } @Override public Optional<Integer> zookeeperElectionPort() { return getInstallVariable("zookeeper_electionPort", Integer::parseInt); } @Override public Optional<String> environment() { return Optional.ofNullable(System.getenv("VESPA_ENVIRONMENT")) .or(() -> getInstallVariable("environment")); } @Override public Optional<String> region() { return Optional.ofNullable(System.getenv("VESPA_REGION")) .or(() -> getInstallVariable("region")); } @Override public Optional<String> system() { return Optional.ofNullable(System.getenv("VESPA_SYSTEM")) .or(() -> getInstallVariable("system")); } @Override public Optional<Boolean> useVespaVersionInRequest() { return getInstallVariable("use_vespa_version_in_request", Boolean::parseBoolean); } @Override public Optional<Boolean> hostedVespa() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_HOSTED")) .or(() -> getInstallVariable("hosted_vespa")) .map(Boolean::parseBoolean); } @Override public Optional<String> loadBalancerAddress() { return getInstallVariable("load_balancer_address"); } @Override public Optional<String> athenzDnsSuffix() { return getInstallVariable("athenz_dns_suffix"); } @Override public Optional<String> ztsUrl() { return getInstallVariable("zts_url"); } @Override static ConfigServer[] toConfigServers(String configserversString) { return multiValueParameterStream(configserversString) .map(CloudConfigInstallVariables::toConfigServer) .toArray(ConfigServer[]::new); } static ConfigServer toConfigServer(String configserverString) { try { String[] hostPortTuple = configserverString.split(":"); if (configserverString.contains(":")) { return new ConfigServer(hostPortTuple[0], Optional.of(Integer.parseInt(hostPortTuple[1]))); } else { return new ConfigServer(configserverString, Optional.empty()); } } catch (Exception e) { throw new IllegalArgumentException("Invalid config server " + configserverString, e); } } static String[] toConfigModelsPluginDir(String configModelsPluginDirString) { return multiValueParameterStream(configModelsPluginDirString).toArray(String[]::new); } private static Optional<String> getInstallVariable(String name) { return getInstallVariable(name, Function.identity()); } private static <T> Optional<T> getInstallVariable(String name, Function<String, T> converter) { return getRawInstallVariable("cloudconfig_server." + name).map(converter); } private static Optional<String> getRawInstallVariable(String name) { return Optional.ofNullable( Optional.ofNullable(System.getenv(name.replace(".", "__"))) .orElseGet(() -> System.getProperty(name))); } private static Stream<String> multiValueParameterStream(String param) { return Arrays.stream(param.split("[, ]")).filter(value -> !value.isEmpty()); } }
class CloudConfigInstallVariables implements CloudConfigOptions { @Override public Optional<Integer> rpcPort() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_RPC_PORT")) .or(() -> getRawInstallVariable("services.port_configserver_rpc")) .map(Integer::parseInt); } @Override public Optional<Boolean> multiTenant() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_MULTITENANT")) .or(() -> getInstallVariable("multitenant")) .map(Boolean::parseBoolean); } @Override public ConfigServer[] allConfigServers() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVERS")) .or(() -> getRawInstallVariable("services.addr_configserver")) .map(CloudConfigInstallVariables::toConfigServers) .orElseGet(() -> new ConfigServer[0]); } @Override public int[] configServerZookeeperIds() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_ZOOKEEPER_IDS")) .map(CloudConfigInstallVariables::multiValueParameterStream) .orElseGet(Stream::empty) .mapToInt(Integer::valueOf) .toArray(); } @Override public Optional<Long> zookeeperBarrierTimeout() { return getInstallVariable("zookeeper_barrier_timeout", Long::parseLong); } @Override public Optional<Long> sessionLifeTimeSecs() { return getInstallVariable("session_lifetime", Long::parseLong); } @Override public String[] configModelPluginDirs() { return getRawInstallVariable("cloudconfig_server.config_model_plugin_dirs") .map(CloudConfigInstallVariables::toConfigModelsPluginDir) .orElseGet(() -> new String[0]); } @Override public Optional<Integer> zookeeperClientPort() { return getInstallVariable("zookeeper_clientPort", Integer::parseInt); } @Override public Optional<Integer> zookeeperQuorumPort() { return getInstallVariable("zookeeper_quorumPort", Integer::parseInt); } @Override public Optional<Integer> zookeeperElectionPort() { return getInstallVariable("zookeeper_electionPort", Integer::parseInt); } @Override public Optional<String> environment() { return Optional.ofNullable(System.getenv("VESPA_ENVIRONMENT")) .or(() -> getInstallVariable("environment")); } @Override public Optional<String> region() { return Optional.ofNullable(System.getenv("VESPA_REGION")) .or(() -> getInstallVariable("region")); } @Override public Optional<String> system() { return Optional.ofNullable(System.getenv("VESPA_SYSTEM")) .or(() -> getInstallVariable("system")); } @Override public Optional<Boolean> useVespaVersionInRequest() { return getInstallVariable("use_vespa_version_in_request", Boolean::parseBoolean); } @Override public Optional<Boolean> hostedVespa() { return Optional.ofNullable(System.getenv("VESPA_CONFIGSERVER_HOSTED")) .or(() -> getInstallVariable("hosted_vespa")) .map(Boolean::parseBoolean); } @Override public Optional<String> loadBalancerAddress() { return getInstallVariable("load_balancer_address"); } @Override public Optional<String> athenzDnsSuffix() { return getInstallVariable("athenz_dns_suffix"); } @Override public Optional<String> ztsUrl() { return getInstallVariable("zts_url"); } @Override static ConfigServer[] toConfigServers(String configserversString) { return multiValueParameterStream(configserversString) .map(CloudConfigInstallVariables::toConfigServer) .toArray(ConfigServer[]::new); } static ConfigServer toConfigServer(String configserverString) { try { String[] hostPortTuple = configserverString.split(":"); if (configserverString.contains(":")) { return new ConfigServer(hostPortTuple[0], Optional.of(Integer.parseInt(hostPortTuple[1]))); } else { return new ConfigServer(configserverString, Optional.empty()); } } catch (Exception e) { throw new IllegalArgumentException("Invalid config server " + configserverString, e); } } static String[] toConfigModelsPluginDir(String configModelsPluginDirString) { return multiValueParameterStream(configModelsPluginDirString).toArray(String[]::new); } private static Optional<String> getInstallVariable(String name) { return getInstallVariable(name, Function.identity()); } private static <T> Optional<T> getInstallVariable(String name, Function<String, T> converter) { return getRawInstallVariable("cloudconfig_server." + name).map(converter); } private static Optional<String> getRawInstallVariable(String name) { return Optional.ofNullable( Optional.ofNullable(System.getenv(name.replace(".", "__"))) .orElseGet(() -> System.getProperty(name))); } private static Stream<String> multiValueParameterStream(String param) { return Arrays.stream(param.split("[, ]")).filter(value -> !value.isEmpty()); } }
This 20000 and 30000 has something to do with the module port range Harald mentioned? Extract to constants?
static List<Integer> getPorts(int id) { if (ports.size() < id * 3) { int previousPort; if (ports.isEmpty()) { String[] version = System.getProperty("zk-version").split("\\."); int versionPortOffset = 0; for (String part : version) versionPortOffset = 32 * (versionPortOffset + Integer.parseInt(part)); previousPort = 20000 + versionPortOffset % 30000; } else previousPort = ports.get(ports.size() - 1); for (int i = 0; i < 3; i++) ports.add(previousPort = nextPort(previousPort)); } return ports.subList(id * 3 - 3, id * 3); }
previousPort = 20000 + versionPortOffset % 30000;
static List<Integer> getPorts(int id) { if (ports.size() < id * 3) { int previousPort; if (ports.isEmpty()) { String[] version = System.getProperty("zk-version").split("\\."); int versionPortOffset = 0; for (String part : version) versionPortOffset = 32 * (versionPortOffset + Integer.parseInt(part)); previousPort = 20000 + versionPortOffset % 30000; } else previousPort = ports.get(ports.size() - 1); for (int i = 0; i < 3; i++) ports.add(previousPort = nextPort(previousPort)); } return ports.subList(id * 3 - 3, id * 3); }
class ZooKeeper { final ExecutorService executor = Executors.newSingleThreadExecutor(); final Phaser phaser = new Phaser(2); final AtomicReference<Future<?>> future = new AtomicReference<>(); ZookeeperServerConfig config; void run() { future.set(executor.submit(() -> { Reconfigurer reconfigurer = new Reconfigurer(new VespaZooKeeperAdminImpl()); phaser.arriveAndAwaitAdvance(); while (config != null) { new ReconfigurableVespaZooKeeperServer(reconfigurer, config); phaser.arriveAndAwaitAdvance(); phaser.arriveAndAwaitAdvance(); } reconfigurer.deconstruct(); })); } void await() throws ExecutionException, InterruptedException, TimeoutException { future.get().get(30, SECONDS); } }
class ZooKeeper { final ExecutorService executor = Executors.newSingleThreadExecutor(); final Phaser phaser = new Phaser(2); final AtomicReference<Future<?>> future = new AtomicReference<>(); ZookeeperServerConfig config; void run() { future.set(executor.submit(() -> { Reconfigurer reconfigurer = new Reconfigurer(new VespaZooKeeperAdminImpl()); phaser.arriveAndAwaitAdvance(); while (config != null) { new ReconfigurableVespaZooKeeperServer(reconfigurer, config); phaser.arriveAndAwaitAdvance(); phaser.arriveAndAwaitAdvance(); } reconfigurer.deconstruct(); })); } void await() throws ExecutionException, InterruptedException, TimeoutException { future.get().get(30, SECONDS); } }
Hmm, they're just picked from thin air, really. Well, those other modules prefer things below 20k, but staying below 50k is just by chance.
static List<Integer> getPorts(int id) { if (ports.size() < id * 3) { int previousPort; if (ports.isEmpty()) { String[] version = System.getProperty("zk-version").split("\\."); int versionPortOffset = 0; for (String part : version) versionPortOffset = 32 * (versionPortOffset + Integer.parseInt(part)); previousPort = 20000 + versionPortOffset % 30000; } else previousPort = ports.get(ports.size() - 1); for (int i = 0; i < 3; i++) ports.add(previousPort = nextPort(previousPort)); } return ports.subList(id * 3 - 3, id * 3); }
previousPort = 20000 + versionPortOffset % 30000;
static List<Integer> getPorts(int id) { if (ports.size() < id * 3) { int previousPort; if (ports.isEmpty()) { String[] version = System.getProperty("zk-version").split("\\."); int versionPortOffset = 0; for (String part : version) versionPortOffset = 32 * (versionPortOffset + Integer.parseInt(part)); previousPort = 20000 + versionPortOffset % 30000; } else previousPort = ports.get(ports.size() - 1); for (int i = 0; i < 3; i++) ports.add(previousPort = nextPort(previousPort)); } return ports.subList(id * 3 - 3, id * 3); }
class ZooKeeper { final ExecutorService executor = Executors.newSingleThreadExecutor(); final Phaser phaser = new Phaser(2); final AtomicReference<Future<?>> future = new AtomicReference<>(); ZookeeperServerConfig config; void run() { future.set(executor.submit(() -> { Reconfigurer reconfigurer = new Reconfigurer(new VespaZooKeeperAdminImpl()); phaser.arriveAndAwaitAdvance(); while (config != null) { new ReconfigurableVespaZooKeeperServer(reconfigurer, config); phaser.arriveAndAwaitAdvance(); phaser.arriveAndAwaitAdvance(); } reconfigurer.deconstruct(); })); } void await() throws ExecutionException, InterruptedException, TimeoutException { future.get().get(30, SECONDS); } }
class ZooKeeper { final ExecutorService executor = Executors.newSingleThreadExecutor(); final Phaser phaser = new Phaser(2); final AtomicReference<Future<?>> future = new AtomicReference<>(); ZookeeperServerConfig config; void run() { future.set(executor.submit(() -> { Reconfigurer reconfigurer = new Reconfigurer(new VespaZooKeeperAdminImpl()); phaser.arriveAndAwaitAdvance(); while (config != null) { new ReconfigurableVespaZooKeeperServer(reconfigurer, config); phaser.arriveAndAwaitAdvance(); phaser.arriveAndAwaitAdvance(); } reconfigurer.deconstruct(); })); } void await() throws ExecutionException, InterruptedException, TimeoutException { future.get().get(30, SECONDS); } }
Suggest to rephrase the warning to e.g: "The pom.xml of bundle 'x.jar' includes a dependency to the artifact y."
private void validatePomXml(DeployLogger deployLogger, String jarFilename, String pomXmlContent) { try { Document pom = DocumentBuilderFactory.newDefaultInstance().newDocumentBuilder() .parse(new InputSource(new StringReader(pomXmlContent))); NodeList dependencies = (NodeList) XPathFactory.newDefaultInstance().newXPath() .compile("/project/dependencies/dependency") .evaluate(pom, XPathConstants.NODESET); for (int i = 0; i < dependencies.getLength(); i++) { Element dependency = (Element) dependencies.item(i); String groupId = dependency.getElementsByTagName("groupId").item(0).getTextContent(); String artifactId = dependency.getElementsByTagName("artifactId").item(0).getTextContent(); for (DeprecatedMavenArtifact deprecatedArtifact : DeprecatedMavenArtifact.values()) { if (groupId.equals(deprecatedArtifact.groupId) && artifactId.equals(deprecatedArtifact.artifactId)) { deployLogger.logApplicationPackage(Level.WARNING, String.format("For pom.xml in '%s': \n" + "The dependency %s:%s is listed below dependencies. \n" + "%s", jarFilename, groupId, artifactId, deprecatedArtifact.description)); } } } } catch (ParserConfigurationException e) { throw new RuntimeException(e); } catch (Exception e) { deployLogger.log(Level.INFO, String.format("Unable to parse pom.xml from %s", jarFilename)); } }
String.format("For pom.xml in '%s': \n" +
private void validatePomXml(DeployLogger deployLogger, String jarFilename, String pomXmlContent) { try { Document pom = DocumentBuilderFactory.newDefaultInstance().newDocumentBuilder() .parse(new InputSource(new StringReader(pomXmlContent))); NodeList dependencies = (NodeList) XPathFactory.newDefaultInstance().newXPath() .compile("/project/dependencies/dependency") .evaluate(pom, XPathConstants.NODESET); for (int i = 0; i < dependencies.getLength(); i++) { Element dependency = (Element) dependencies.item(i); String groupId = dependency.getElementsByTagName("groupId").item(0).getTextContent(); String artifactId = dependency.getElementsByTagName("artifactId").item(0).getTextContent(); for (DeprecatedMavenArtifact deprecatedArtifact : DeprecatedMavenArtifact.values()) { if (groupId.equals(deprecatedArtifact.groupId) && artifactId.equals(deprecatedArtifact.artifactId)) { deployLogger.logApplicationPackage(Level.WARNING, String.format( "The pom.xml of bundle '%s' includes a dependency to the artifact '%s:%s'. \n%s", jarFilename, groupId, artifactId, deprecatedArtifact.description)); } } } } catch (ParserConfigurationException e) { throw new RuntimeException(e); } catch (Exception e) { deployLogger.log(Level.INFO, String.format("Unable to parse pom.xml from %s", jarFilename)); } }
class BundleValidator extends Validator { public BundleValidator() {} @Override public void validate(VespaModel model, DeployState deployState) { ApplicationPackage app = deployState.getApplicationPackage(); for (ComponentInfo info : app.getComponentsInfo(deployState.getVespaVersion())) { Path path = Path.fromString(info.getPathRelativeToAppDir()); try { DeployLogger deployLogger = deployState.getDeployLogger(); deployLogger.log(Level.FINE, String.format("Validating bundle at '%s'", path)); JarFile jarFile = new JarFile(app.getFileReference(path)); validateJarFile(deployLogger, jarFile); } catch (IOException e) { throw new IllegalArgumentException( "Failed to validate JAR file '" + path.last() + "'", e); } } } void validateJarFile(DeployLogger deployLogger, JarFile jarFile) throws IOException { Manifest manifest = jarFile.getManifest(); String filename = Paths.get(jarFile.getName()).getFileName().toString(); if (manifest == null) { throw new IllegalArgumentException("Non-existing or invalid manifest in " + filename); } validateManifest(deployLogger, filename, manifest); getPomXmlContent(deployLogger, jarFile) .ifPresent(pomXml -> validatePomXml(deployLogger, filename, pomXml)); } private void validateManifest(DeployLogger deployLogger, String filename, Manifest mf) { Attributes attributes = mf.getMainAttributes(); HashSet<String> mfAttributes = new HashSet<>(); for (Map.Entry<Object,Object> entry : attributes.entrySet()) { mfAttributes.add(entry.getKey().toString()); } List<String> requiredOSGIHeaders = Arrays.asList( "Bundle-ManifestVersion", "Bundle-Name", "Bundle-SymbolicName", "Bundle-Version"); for (String header : requiredOSGIHeaders) { if (!mfAttributes.contains(header)) { throw new IllegalArgumentException("Required OSGI header '" + header + "' was not found in manifest in '" + filename + "'"); } } if (attributes.getValue("Bundle-Version").endsWith(".SNAPSHOT")) { deployLogger.logApplicationPackage(Level.WARNING, "Deploying snapshot bundle " + filename + ".\nTo use this bundle, you must include the qualifier 'SNAPSHOT' in the version specification in services.xml."); } if (attributes.getValue("Import-Package") != null) { validateImportedPackages(deployLogger, filename, mf); } } private static void validateImportedPackages(DeployLogger deployLogger, String filename, Manifest manifest) { Domain osgiHeaders = Domain.domain(manifest); Parameters importPackage = osgiHeaders.getImportPackage(); Map<DeprecatedProvidedBundle, List<String>> deprecatedPackagesInUse = new HashMap<>(); importPackage.forEach((packageName, attrs) -> { VersionRange versionRange = attrs.getVersion() != null ? VersionRange.parseOSGiVersionRange(attrs.getVersion()) : null; for (DeprecatedProvidedBundle deprecatedBundle : DeprecatedProvidedBundle.values()) { for (Predicate<String> matcher : deprecatedBundle.javaPackageMatchers) { if (matcher.test(packageName) && (versionRange == null || deprecatedBundle.versionDiscriminator.test(versionRange))) { deprecatedPackagesInUse.computeIfAbsent(deprecatedBundle, __ -> new ArrayList<>()) .add(packageName); } } } }); deprecatedPackagesInUse.forEach((artifact, packagesInUse) -> { deployLogger.logApplicationPackage(Level.WARNING, String.format("For JAR file '%s': \n" + "Manifest imports the following Java packages from '%s': %s. \n" + "%s", filename, artifact.name, packagesInUse, artifact.description)); }); } private static final Pattern POM_FILE_LOCATION = Pattern.compile("META-INF/maven/.+?/.+?/pom.xml"); private Optional<String> getPomXmlContent(DeployLogger deployLogger, JarFile jarFile) { return jarFile.stream() .filter(f -> POM_FILE_LOCATION.matcher(f.getName()).matches()) .findFirst() .map(f -> { try { return new String(jarFile.getInputStream(f).readAllBytes()); } catch (IOException e) { deployLogger.log(Level.INFO, String.format("Unable to read '%s' from '%s'", f.getName(), jarFile.getName())); return null; } }); } private enum DeprecatedMavenArtifact { VESPA_HTTP_CLIENT_EXTENSION("com.yahoo.vespa", "vespa-http-client-extensions", "The 'vespa-http-client-extensions' artifact will be removed in Vespa 8. " + "Programmatic use can be safely removed from system/staging tests. " + "See internal Vespa 8 release notes for details."); final String groupId; final String artifactId; final String description; DeprecatedMavenArtifact(String groupId, String artifactId, String description) { this.groupId = groupId; this.artifactId = artifactId; this.description = description; } } private enum DeprecatedProvidedBundle { ORG_JSON("org.json:json", "The org.json library will no longer provided by jdisc runtime on Vespa 8. " + "See https: Set.of("org\\.json")); final String name; final Collection<Predicate<String>> javaPackageMatchers; final Predicate<VersionRange> versionDiscriminator; final String description; DeprecatedProvidedBundle(String name, String description, Collection<String> javaPackagePatterns) { this(name, description, __ -> true, javaPackagePatterns); } DeprecatedProvidedBundle(String name, String description, Predicate<VersionRange> versionDiscriminator, Collection<String> javaPackagePatterns) { this.name = name; this.javaPackageMatchers = javaPackagePatterns.stream() .map(s -> Pattern.compile(s).asMatchPredicate()) .collect(Collectors.toList()); this.versionDiscriminator = versionDiscriminator; this.description = description; } } }
class BundleValidator extends Validator { public BundleValidator() {} @Override public void validate(VespaModel model, DeployState deployState) { ApplicationPackage app = deployState.getApplicationPackage(); for (ComponentInfo info : app.getComponentsInfo(deployState.getVespaVersion())) { Path path = Path.fromString(info.getPathRelativeToAppDir()); try { DeployLogger deployLogger = deployState.getDeployLogger(); deployLogger.log(Level.FINE, String.format("Validating bundle at '%s'", path)); JarFile jarFile = new JarFile(app.getFileReference(path)); validateJarFile(deployLogger, jarFile); } catch (IOException e) { throw new IllegalArgumentException( "Failed to validate JAR file '" + path.last() + "'", e); } } } void validateJarFile(DeployLogger deployLogger, JarFile jarFile) throws IOException { Manifest manifest = jarFile.getManifest(); String filename = Paths.get(jarFile.getName()).getFileName().toString(); if (manifest == null) { throw new IllegalArgumentException("Non-existing or invalid manifest in " + filename); } validateManifest(deployLogger, filename, manifest); getPomXmlContent(deployLogger, jarFile) .ifPresent(pomXml -> validatePomXml(deployLogger, filename, pomXml)); } private void validateManifest(DeployLogger deployLogger, String filename, Manifest mf) { Attributes attributes = mf.getMainAttributes(); HashSet<String> mfAttributes = new HashSet<>(); for (Map.Entry<Object,Object> entry : attributes.entrySet()) { mfAttributes.add(entry.getKey().toString()); } List<String> requiredOSGIHeaders = Arrays.asList( "Bundle-ManifestVersion", "Bundle-Name", "Bundle-SymbolicName", "Bundle-Version"); for (String header : requiredOSGIHeaders) { if (!mfAttributes.contains(header)) { throw new IllegalArgumentException("Required OSGI header '" + header + "' was not found in manifest in '" + filename + "'"); } } if (attributes.getValue("Bundle-Version").endsWith(".SNAPSHOT")) { deployLogger.logApplicationPackage(Level.WARNING, "Deploying snapshot bundle " + filename + ".\nTo use this bundle, you must include the qualifier 'SNAPSHOT' in the version specification in services.xml."); } if (attributes.getValue("Import-Package") != null) { validateImportedPackages(deployLogger, filename, mf); } } private static void validateImportedPackages(DeployLogger deployLogger, String filename, Manifest manifest) { Domain osgiHeaders = Domain.domain(manifest); Parameters importPackage = osgiHeaders.getImportPackage(); Map<DeprecatedProvidedBundle, List<String>> deprecatedPackagesInUse = new HashMap<>(); importPackage.forEach((packageName, attrs) -> { VersionRange versionRange = attrs.getVersion() != null ? VersionRange.parseOSGiVersionRange(attrs.getVersion()) : null; for (DeprecatedProvidedBundle deprecatedBundle : DeprecatedProvidedBundle.values()) { for (Predicate<String> matcher : deprecatedBundle.javaPackageMatchers) { if (matcher.test(packageName) && (versionRange == null || deprecatedBundle.versionDiscriminator.test(versionRange))) { deprecatedPackagesInUse.computeIfAbsent(deprecatedBundle, __ -> new ArrayList<>()) .add(packageName); } } } }); deprecatedPackagesInUse.forEach((artifact, packagesInUse) -> { deployLogger.logApplicationPackage(Level.WARNING, String.format("For JAR file '%s': \n" + "Manifest imports the following Java packages from '%s': %s. \n" + "%s", filename, artifact.name, packagesInUse, artifact.description)); }); } private static final Pattern POM_FILE_LOCATION = Pattern.compile("META-INF/maven/.+?/.+?/pom.xml"); private Optional<String> getPomXmlContent(DeployLogger deployLogger, JarFile jarFile) { return jarFile.stream() .filter(f -> POM_FILE_LOCATION.matcher(f.getName()).matches()) .findFirst() .map(f -> { try { return new String(jarFile.getInputStream(f).readAllBytes()); } catch (IOException e) { deployLogger.log(Level.INFO, String.format("Unable to read '%s' from '%s'", f.getName(), jarFile.getName())); return null; } }); } private enum DeprecatedMavenArtifact { VESPA_HTTP_CLIENT_EXTENSION("com.yahoo.vespa", "vespa-http-client-extensions", "This artifact will be removed in Vespa 8. " + "Programmatic use can be safely removed from system/staging tests. " + "See internal Vespa 8 release notes for details."); final String groupId; final String artifactId; final String description; DeprecatedMavenArtifact(String groupId, String artifactId, String description) { this.groupId = groupId; this.artifactId = artifactId; this.description = description; } } private enum DeprecatedProvidedBundle { ORG_JSON("org.json:json", "The org.json library will no longer provided by jdisc runtime on Vespa 8. " + "See https: Set.of("org\\.json")); final String name; final Collection<Predicate<String>> javaPackageMatchers; final Predicate<VersionRange> versionDiscriminator; final String description; DeprecatedProvidedBundle(String name, String description, Collection<String> javaPackagePatterns) { this(name, description, __ -> true, javaPackagePatterns); } DeprecatedProvidedBundle(String name, String description, Predicate<VersionRange> versionDiscriminator, Collection<String> javaPackagePatterns) { this.name = name; this.javaPackageMatchers = javaPackagePatterns.stream() .map(s -> Pattern.compile(s).asMatchPredicate()) .collect(Collectors.toList()); this.versionDiscriminator = versionDiscriminator; this.description = description; } } }
Fixed
private void validatePomXml(DeployLogger deployLogger, String jarFilename, String pomXmlContent) { try { Document pom = DocumentBuilderFactory.newDefaultInstance().newDocumentBuilder() .parse(new InputSource(new StringReader(pomXmlContent))); NodeList dependencies = (NodeList) XPathFactory.newDefaultInstance().newXPath() .compile("/project/dependencies/dependency") .evaluate(pom, XPathConstants.NODESET); for (int i = 0; i < dependencies.getLength(); i++) { Element dependency = (Element) dependencies.item(i); String groupId = dependency.getElementsByTagName("groupId").item(0).getTextContent(); String artifactId = dependency.getElementsByTagName("artifactId").item(0).getTextContent(); for (DeprecatedMavenArtifact deprecatedArtifact : DeprecatedMavenArtifact.values()) { if (groupId.equals(deprecatedArtifact.groupId) && artifactId.equals(deprecatedArtifact.artifactId)) { deployLogger.logApplicationPackage(Level.WARNING, String.format("For pom.xml in '%s': \n" + "The dependency %s:%s is listed below dependencies. \n" + "%s", jarFilename, groupId, artifactId, deprecatedArtifact.description)); } } } } catch (ParserConfigurationException e) { throw new RuntimeException(e); } catch (Exception e) { deployLogger.log(Level.INFO, String.format("Unable to parse pom.xml from %s", jarFilename)); } }
String.format("For pom.xml in '%s': \n" +
private void validatePomXml(DeployLogger deployLogger, String jarFilename, String pomXmlContent) { try { Document pom = DocumentBuilderFactory.newDefaultInstance().newDocumentBuilder() .parse(new InputSource(new StringReader(pomXmlContent))); NodeList dependencies = (NodeList) XPathFactory.newDefaultInstance().newXPath() .compile("/project/dependencies/dependency") .evaluate(pom, XPathConstants.NODESET); for (int i = 0; i < dependencies.getLength(); i++) { Element dependency = (Element) dependencies.item(i); String groupId = dependency.getElementsByTagName("groupId").item(0).getTextContent(); String artifactId = dependency.getElementsByTagName("artifactId").item(0).getTextContent(); for (DeprecatedMavenArtifact deprecatedArtifact : DeprecatedMavenArtifact.values()) { if (groupId.equals(deprecatedArtifact.groupId) && artifactId.equals(deprecatedArtifact.artifactId)) { deployLogger.logApplicationPackage(Level.WARNING, String.format( "The pom.xml of bundle '%s' includes a dependency to the artifact '%s:%s'. \n%s", jarFilename, groupId, artifactId, deprecatedArtifact.description)); } } } } catch (ParserConfigurationException e) { throw new RuntimeException(e); } catch (Exception e) { deployLogger.log(Level.INFO, String.format("Unable to parse pom.xml from %s", jarFilename)); } }
class BundleValidator extends Validator { public BundleValidator() {} @Override public void validate(VespaModel model, DeployState deployState) { ApplicationPackage app = deployState.getApplicationPackage(); for (ComponentInfo info : app.getComponentsInfo(deployState.getVespaVersion())) { Path path = Path.fromString(info.getPathRelativeToAppDir()); try { DeployLogger deployLogger = deployState.getDeployLogger(); deployLogger.log(Level.FINE, String.format("Validating bundle at '%s'", path)); JarFile jarFile = new JarFile(app.getFileReference(path)); validateJarFile(deployLogger, jarFile); } catch (IOException e) { throw new IllegalArgumentException( "Failed to validate JAR file '" + path.last() + "'", e); } } } void validateJarFile(DeployLogger deployLogger, JarFile jarFile) throws IOException { Manifest manifest = jarFile.getManifest(); String filename = Paths.get(jarFile.getName()).getFileName().toString(); if (manifest == null) { throw new IllegalArgumentException("Non-existing or invalid manifest in " + filename); } validateManifest(deployLogger, filename, manifest); getPomXmlContent(deployLogger, jarFile) .ifPresent(pomXml -> validatePomXml(deployLogger, filename, pomXml)); } private void validateManifest(DeployLogger deployLogger, String filename, Manifest mf) { Attributes attributes = mf.getMainAttributes(); HashSet<String> mfAttributes = new HashSet<>(); for (Map.Entry<Object,Object> entry : attributes.entrySet()) { mfAttributes.add(entry.getKey().toString()); } List<String> requiredOSGIHeaders = Arrays.asList( "Bundle-ManifestVersion", "Bundle-Name", "Bundle-SymbolicName", "Bundle-Version"); for (String header : requiredOSGIHeaders) { if (!mfAttributes.contains(header)) { throw new IllegalArgumentException("Required OSGI header '" + header + "' was not found in manifest in '" + filename + "'"); } } if (attributes.getValue("Bundle-Version").endsWith(".SNAPSHOT")) { deployLogger.logApplicationPackage(Level.WARNING, "Deploying snapshot bundle " + filename + ".\nTo use this bundle, you must include the qualifier 'SNAPSHOT' in the version specification in services.xml."); } if (attributes.getValue("Import-Package") != null) { validateImportedPackages(deployLogger, filename, mf); } } private static void validateImportedPackages(DeployLogger deployLogger, String filename, Manifest manifest) { Domain osgiHeaders = Domain.domain(manifest); Parameters importPackage = osgiHeaders.getImportPackage(); Map<DeprecatedProvidedBundle, List<String>> deprecatedPackagesInUse = new HashMap<>(); importPackage.forEach((packageName, attrs) -> { VersionRange versionRange = attrs.getVersion() != null ? VersionRange.parseOSGiVersionRange(attrs.getVersion()) : null; for (DeprecatedProvidedBundle deprecatedBundle : DeprecatedProvidedBundle.values()) { for (Predicate<String> matcher : deprecatedBundle.javaPackageMatchers) { if (matcher.test(packageName) && (versionRange == null || deprecatedBundle.versionDiscriminator.test(versionRange))) { deprecatedPackagesInUse.computeIfAbsent(deprecatedBundle, __ -> new ArrayList<>()) .add(packageName); } } } }); deprecatedPackagesInUse.forEach((artifact, packagesInUse) -> { deployLogger.logApplicationPackage(Level.WARNING, String.format("For JAR file '%s': \n" + "Manifest imports the following Java packages from '%s': %s. \n" + "%s", filename, artifact.name, packagesInUse, artifact.description)); }); } private static final Pattern POM_FILE_LOCATION = Pattern.compile("META-INF/maven/.+?/.+?/pom.xml"); private Optional<String> getPomXmlContent(DeployLogger deployLogger, JarFile jarFile) { return jarFile.stream() .filter(f -> POM_FILE_LOCATION.matcher(f.getName()).matches()) .findFirst() .map(f -> { try { return new String(jarFile.getInputStream(f).readAllBytes()); } catch (IOException e) { deployLogger.log(Level.INFO, String.format("Unable to read '%s' from '%s'", f.getName(), jarFile.getName())); return null; } }); } private enum DeprecatedMavenArtifact { VESPA_HTTP_CLIENT_EXTENSION("com.yahoo.vespa", "vespa-http-client-extensions", "The 'vespa-http-client-extensions' artifact will be removed in Vespa 8. " + "Programmatic use can be safely removed from system/staging tests. " + "See internal Vespa 8 release notes for details."); final String groupId; final String artifactId; final String description; DeprecatedMavenArtifact(String groupId, String artifactId, String description) { this.groupId = groupId; this.artifactId = artifactId; this.description = description; } } private enum DeprecatedProvidedBundle { ORG_JSON("org.json:json", "The org.json library will no longer provided by jdisc runtime on Vespa 8. " + "See https: Set.of("org\\.json")); final String name; final Collection<Predicate<String>> javaPackageMatchers; final Predicate<VersionRange> versionDiscriminator; final String description; DeprecatedProvidedBundle(String name, String description, Collection<String> javaPackagePatterns) { this(name, description, __ -> true, javaPackagePatterns); } DeprecatedProvidedBundle(String name, String description, Predicate<VersionRange> versionDiscriminator, Collection<String> javaPackagePatterns) { this.name = name; this.javaPackageMatchers = javaPackagePatterns.stream() .map(s -> Pattern.compile(s).asMatchPredicate()) .collect(Collectors.toList()); this.versionDiscriminator = versionDiscriminator; this.description = description; } } }
class BundleValidator extends Validator { public BundleValidator() {} @Override public void validate(VespaModel model, DeployState deployState) { ApplicationPackage app = deployState.getApplicationPackage(); for (ComponentInfo info : app.getComponentsInfo(deployState.getVespaVersion())) { Path path = Path.fromString(info.getPathRelativeToAppDir()); try { DeployLogger deployLogger = deployState.getDeployLogger(); deployLogger.log(Level.FINE, String.format("Validating bundle at '%s'", path)); JarFile jarFile = new JarFile(app.getFileReference(path)); validateJarFile(deployLogger, jarFile); } catch (IOException e) { throw new IllegalArgumentException( "Failed to validate JAR file '" + path.last() + "'", e); } } } void validateJarFile(DeployLogger deployLogger, JarFile jarFile) throws IOException { Manifest manifest = jarFile.getManifest(); String filename = Paths.get(jarFile.getName()).getFileName().toString(); if (manifest == null) { throw new IllegalArgumentException("Non-existing or invalid manifest in " + filename); } validateManifest(deployLogger, filename, manifest); getPomXmlContent(deployLogger, jarFile) .ifPresent(pomXml -> validatePomXml(deployLogger, filename, pomXml)); } private void validateManifest(DeployLogger deployLogger, String filename, Manifest mf) { Attributes attributes = mf.getMainAttributes(); HashSet<String> mfAttributes = new HashSet<>(); for (Map.Entry<Object,Object> entry : attributes.entrySet()) { mfAttributes.add(entry.getKey().toString()); } List<String> requiredOSGIHeaders = Arrays.asList( "Bundle-ManifestVersion", "Bundle-Name", "Bundle-SymbolicName", "Bundle-Version"); for (String header : requiredOSGIHeaders) { if (!mfAttributes.contains(header)) { throw new IllegalArgumentException("Required OSGI header '" + header + "' was not found in manifest in '" + filename + "'"); } } if (attributes.getValue("Bundle-Version").endsWith(".SNAPSHOT")) { deployLogger.logApplicationPackage(Level.WARNING, "Deploying snapshot bundle " + filename + ".\nTo use this bundle, you must include the qualifier 'SNAPSHOT' in the version specification in services.xml."); } if (attributes.getValue("Import-Package") != null) { validateImportedPackages(deployLogger, filename, mf); } } private static void validateImportedPackages(DeployLogger deployLogger, String filename, Manifest manifest) { Domain osgiHeaders = Domain.domain(manifest); Parameters importPackage = osgiHeaders.getImportPackage(); Map<DeprecatedProvidedBundle, List<String>> deprecatedPackagesInUse = new HashMap<>(); importPackage.forEach((packageName, attrs) -> { VersionRange versionRange = attrs.getVersion() != null ? VersionRange.parseOSGiVersionRange(attrs.getVersion()) : null; for (DeprecatedProvidedBundle deprecatedBundle : DeprecatedProvidedBundle.values()) { for (Predicate<String> matcher : deprecatedBundle.javaPackageMatchers) { if (matcher.test(packageName) && (versionRange == null || deprecatedBundle.versionDiscriminator.test(versionRange))) { deprecatedPackagesInUse.computeIfAbsent(deprecatedBundle, __ -> new ArrayList<>()) .add(packageName); } } } }); deprecatedPackagesInUse.forEach((artifact, packagesInUse) -> { deployLogger.logApplicationPackage(Level.WARNING, String.format("For JAR file '%s': \n" + "Manifest imports the following Java packages from '%s': %s. \n" + "%s", filename, artifact.name, packagesInUse, artifact.description)); }); } private static final Pattern POM_FILE_LOCATION = Pattern.compile("META-INF/maven/.+?/.+?/pom.xml"); private Optional<String> getPomXmlContent(DeployLogger deployLogger, JarFile jarFile) { return jarFile.stream() .filter(f -> POM_FILE_LOCATION.matcher(f.getName()).matches()) .findFirst() .map(f -> { try { return new String(jarFile.getInputStream(f).readAllBytes()); } catch (IOException e) { deployLogger.log(Level.INFO, String.format("Unable to read '%s' from '%s'", f.getName(), jarFile.getName())); return null; } }); } private enum DeprecatedMavenArtifact { VESPA_HTTP_CLIENT_EXTENSION("com.yahoo.vespa", "vespa-http-client-extensions", "This artifact will be removed in Vespa 8. " + "Programmatic use can be safely removed from system/staging tests. " + "See internal Vespa 8 release notes for details."); final String groupId; final String artifactId; final String description; DeprecatedMavenArtifact(String groupId, String artifactId, String description) { this.groupId = groupId; this.artifactId = artifactId; this.description = description; } } private enum DeprecatedProvidedBundle { ORG_JSON("org.json:json", "The org.json library will no longer provided by jdisc runtime on Vespa 8. " + "See https: Set.of("org\\.json")); final String name; final Collection<Predicate<String>> javaPackageMatchers; final Predicate<VersionRange> versionDiscriminator; final String description; DeprecatedProvidedBundle(String name, String description, Collection<String> javaPackagePatterns) { this(name, description, __ -> true, javaPackagePatterns); } DeprecatedProvidedBundle(String name, String description, Predicate<VersionRange> versionDiscriminator, Collection<String> javaPackagePatterns) { this.name = name; this.javaPackageMatchers = javaPackagePatterns.stream() .map(s -> Pattern.compile(s).asMatchPredicate()) .collect(Collectors.toList()); this.versionDiscriminator = versionDiscriminator; this.description = description; } } }
Who does the waiting here? I guess it could take a long time.
private void restartServices(ApplicationId applicationId) { RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy); if ( ! restartActions.isEmpty()) { waitForConfigToConverge(applicationId); Set<String> hostnames = restartActions.getEntries().stream() .flatMap(entry -> entry.getServices().stream()) .map(ServiceInfo::getHostName) .collect(Collectors.toUnmodifiableSet()); provisioner.get().restart(applicationId, HostFilter.from(hostnames, Set.of(), Set.of(), Set.of())); deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s", hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", ")))); log.info(String.format("%sScheduled service restart of %d nodes: %s", session.logPre(), hostnames.size(), restartActions.format())); this.configChangeActions = new ConfigChangeActions( new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions()); } }
waitForConfigToConverge(applicationId);
private void restartServices(ApplicationId applicationId) { RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy); if ( ! restartActions.isEmpty()) { waitForConfigToConverge(applicationId); Set<String> hostnames = restartActions.getEntries().stream() .flatMap(entry -> entry.getServices().stream()) .map(ServiceInfo::getHostName) .collect(Collectors.toUnmodifiableSet()); provisioner.get().restart(applicationId, HostFilter.from(hostnames, Set.of(), Set.of(), Set.of())); deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s", hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", ")))); log.info(String.format("%sScheduled service restart of %d nodes: %s", session.logPre(), hostnames.size(), restartActions.format())); this.configChangeActions = new ConfigChangeActions( new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions()); } }
class Deployment implements com.yahoo.config.provision.Deployment { private static final Logger log = Logger.getLogger(Deployment.class.getName()); private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60); /** The session containing the application instance to activate */ private final Session session; private final ApplicationRepository applicationRepository; private final Supplier<PrepareParams> params; private final Optional<Provisioner> provisioner; private final Tenant tenant; private final DeployLogger deployLogger; private final Clock clock; private final boolean internalRedeploy; private boolean prepared; private ConfigChangeActions configChangeActions; private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock, boolean internalRedeploy, boolean prepared) { this.session = session; this.applicationRepository = applicationRepository; this.params = params; this.provisioner = provisioner; this.tenant = tenant; this.deployLogger = deployLogger; this.clock = clock; this.internalRedeploy = internalRedeploy; this.prepared = prepared; } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) { return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false); } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean validate, boolean isBootstrap) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false); } public static Deployment prepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean isBootstrap, boolean force) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true); } /** Prepares this. This does nothing if this is already prepared */ @Override public void prepare() { if (prepared) return; PrepareParams params = this.params.get(); ApplicationId applicationId = params.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.prepareMillis")) { this.configChangeActions = tenant.getSessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant()); this.prepared = true; } waitForResourcesOrTimeout(params, session, provisioner); } /** Activates this. If it is not already prepared, this will call prepare first. */ @Override public long activate() { prepare(); validateSessionStatus(session); PrepareParams params = this.params.get(); ApplicationId applicationId = session.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.activateMillis")) { TimeoutBudget timeoutBudget = params.getTimeoutBudget(); timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'"); Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force()); activation.awaitCompletion(timeoutBudget.timeLeft()); logActivatedMessage(applicationId, activation); if (provisioner.isPresent() && configChangeActions != null) restartServices(applicationId); storeReindexing(applicationId, session.getMetaData().getGeneration()); return session.getMetaData().getGeneration(); } } private void logActivatedMessage(ApplicationId applicationId, Activation activation) { Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId); String fileReferencesText = fileReferences.size() > 10 ? " " + fileReferences.size() + " file references" : "File references: " + fileReferences; log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " + provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") + ". Config generation " + session.getMetaData().getGeneration() + activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") + ". " + fileReferencesText); } private void waitForConfigToConverge(ApplicationId applicationId) { BooleanFlag verify = Flags.CHECK_CONFIG_CONVERGENCE_BEFORE_RESTARTING.bindTo(applicationRepository.flagSource()); if ( ! verify.value()) return; Instant end = clock.instant().plus(Duration.ofMinutes(10)); Duration timeout = Duration.ofSeconds(10); do { Application app = applicationRepository.getActiveApplication(applicationId); log.info("Wait for services in " + applicationId + " to converge on new generation before restarting"); ConfigConvergenceChecker convergenceChecker = applicationRepository.configConvergenceChecker(); ServiceListResponse response = convergenceChecker.getConfigGenerationsForAllServices(app, timeout); if (response.converged) { log.info("services converged on new generation " + response.currentGeneration); return; } else { log.info("services not converged on new generation, wanted generation: " + response.wantedGeneration + ", current generation: " + response.currentGeneration + ", will retry"); try { Thread.sleep(10_000); } catch (InterruptedException e) { /* ignore */ } } } while (clock.instant().isBefore(end)); throw new RuntimeException("Config has not converged"); } private void storeReindexing(ApplicationId applicationId, long requiredSession) { applicationRepository.modifyReindexing(applicationId, reindexing -> { if (configChangeActions != null) for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries()) reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession); return reindexing; }); } /** * Request a restart of services of this application on hosts matching the filter. * This is sometimes needed after activation, but can also be requested without * doing prepare and activate in the same session. */ @Override public void restart(HostFilter filter) { provisioner.get().restart(session.getApplicationId(), filter); } /** Exposes the session of this for testing only */ public Session session() { return session; } /** * @return config change actions that need to be performed as result of prepare * @throws IllegalArgumentException if called without being prepared by this */ public ConfigChangeActions configChangeActions() { if (configChangeActions != null) return configChangeActions; throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared")); } private void validateSessionStatus(Session session) { long sessionId = session.getSessionId(); if (Session.Status.NEW.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared"); } else if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active"); } } /** * @param clock system clock * @param timeout total timeout duration of prepare + activate * @param session the local session for this deployment * @param isBootstrap true if this deployment is done to bootstrap the config server * @param ignoreValidationErrors whether this model should be validated * @param force whether activation of this model should be forced */ private static Supplier<PrepareParams> createPrepareParams( Clock clock, Duration timeout, Session session, boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) { return Suppliers.memoize(() -> { TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); PrepareParams.Builder params = new PrepareParams.Builder() .applicationId(session.getApplicationId()) .vespaVersion(session.getVespaVersion().toString()) .timeoutBudget(timeoutBudget) .ignoreValidationErrors(ignoreValidationErrors) .isBootstrap(isBootstrap) .force(force) .waitForResourcesInPrepare(waitForResourcesInPrepare) .tenantSecretStores(session.getTenantSecretStores()); session.getDockerImageRepository().ifPresent(params::dockerImageRepository); session.getAthenzDomain().ifPresent(params::athenzDomain); return params.build(); }); } private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) { if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return; Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts(); ActivationContext context = new ActivationContext(session.getSessionId()); ProvisionLock lock = new ProvisionLock(session.getApplicationId(), () -> {}); AtomicReference<TransientException> lastException = new AtomicReference<>(); while (true) { params.getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" + Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse("")); try { ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction()); provisioner.get().activate(preparedHosts, context, transaction); return; } catch (TransientException e) { lastException.set(e); try { Thread.sleep(durationBetweenResourceReadyChecks.toMillis()); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } }
class Deployment implements com.yahoo.config.provision.Deployment { private static final Logger log = Logger.getLogger(Deployment.class.getName()); private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60); /** The session containing the application instance to activate */ private final Session session; private final ApplicationRepository applicationRepository; private final Supplier<PrepareParams> params; private final Optional<Provisioner> provisioner; private final Tenant tenant; private final DeployLogger deployLogger; private final Clock clock; private final boolean internalRedeploy; private boolean prepared; private ConfigChangeActions configChangeActions; private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock, boolean internalRedeploy, boolean prepared) { this.session = session; this.applicationRepository = applicationRepository; this.params = params; this.provisioner = provisioner; this.tenant = tenant; this.deployLogger = deployLogger; this.clock = clock; this.internalRedeploy = internalRedeploy; this.prepared = prepared; } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) { return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false); } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean validate, boolean isBootstrap) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false); } public static Deployment prepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean isBootstrap, boolean force) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true); } /** Prepares this. This does nothing if this is already prepared */ @Override public void prepare() { if (prepared) return; PrepareParams params = this.params.get(); ApplicationId applicationId = params.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.prepareMillis")) { this.configChangeActions = tenant.getSessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant()); this.prepared = true; } waitForResourcesOrTimeout(params, session, provisioner); } /** Activates this. If it is not already prepared, this will call prepare first. */ @Override public long activate() { prepare(); validateSessionStatus(session); PrepareParams params = this.params.get(); ApplicationId applicationId = session.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.activateMillis")) { TimeoutBudget timeoutBudget = params.getTimeoutBudget(); timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'"); Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force()); activation.awaitCompletion(timeoutBudget.timeLeft()); logActivatedMessage(applicationId, activation); if (provisioner.isPresent() && configChangeActions != null) restartServices(applicationId); storeReindexing(applicationId, session.getMetaData().getGeneration()); return session.getMetaData().getGeneration(); } } private void logActivatedMessage(ApplicationId applicationId, Activation activation) { Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId); String fileReferencesText = fileReferences.size() > 10 ? " " + fileReferences.size() + " file references" : "File references: " + fileReferences; log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " + provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") + ". Config generation " + session.getMetaData().getGeneration() + activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") + ". " + fileReferencesText); } private void waitForConfigToConverge(ApplicationId applicationId) { BooleanFlag verify = Flags.CHECK_CONFIG_CONVERGENCE_BEFORE_RESTARTING.bindTo(applicationRepository.flagSource()); if ( ! verify.value()) return; Duration timeout = Duration.ofSeconds(10); while (true) { try { params.get().getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for config convergence for " + applicationId); } catch (UncheckedTimeoutException e) { throw new ConfigNotConvergedException(e); } Application app = applicationRepository.getActiveApplication(applicationId); log.info(session.logPre() + "Wait for services to converge on new generation before restarting"); ConfigConvergenceChecker convergenceChecker = applicationRepository.configConvergenceChecker(); ServiceListResponse response = convergenceChecker.getConfigGenerationsForAllServices(app, timeout); if (response.converged) { log.info(session.logPre() + "Services converged on new generation " + response.currentGeneration); return; } else { log.info(session.logPre() + "Services not converged on new generation, wanted generation: " + response.wantedGeneration + ", current generation: " + response.currentGeneration + ", will retry"); try { Thread.sleep(10_000); } catch (InterruptedException e) { /* ignore */ } } } } private void storeReindexing(ApplicationId applicationId, long requiredSession) { applicationRepository.modifyReindexing(applicationId, reindexing -> { if (configChangeActions != null) for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries()) reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession); return reindexing; }); } /** * Request a restart of services of this application on hosts matching the filter. * This is sometimes needed after activation, but can also be requested without * doing prepare and activate in the same session. */ @Override public void restart(HostFilter filter) { provisioner.get().restart(session.getApplicationId(), filter); } /** Exposes the session of this for testing only */ public Session session() { return session; } /** * @return config change actions that need to be performed as result of prepare * @throws IllegalArgumentException if called without being prepared by this */ public ConfigChangeActions configChangeActions() { if (configChangeActions != null) return configChangeActions; throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared")); } private void validateSessionStatus(Session session) { long sessionId = session.getSessionId(); if (Session.Status.NEW.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared"); } else if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active"); } } /** * @param clock system clock * @param timeout total timeout duration of prepare + activate * @param session the local session for this deployment * @param isBootstrap true if this deployment is done to bootstrap the config server * @param ignoreValidationErrors whether this model should be validated * @param force whether activation of this model should be forced */ private static Supplier<PrepareParams> createPrepareParams( Clock clock, Duration timeout, Session session, boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) { return Suppliers.memoize(() -> { TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); PrepareParams.Builder params = new PrepareParams.Builder() .applicationId(session.getApplicationId()) .vespaVersion(session.getVespaVersion().toString()) .timeoutBudget(timeoutBudget) .ignoreValidationErrors(ignoreValidationErrors) .isBootstrap(isBootstrap) .force(force) .waitForResourcesInPrepare(waitForResourcesInPrepare) .tenantSecretStores(session.getTenantSecretStores()); session.getDockerImageRepository().ifPresent(params::dockerImageRepository); session.getAthenzDomain().ifPresent(params::athenzDomain); return params.build(); }); } private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) { if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return; Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts(); ActivationContext context = new ActivationContext(session.getSessionId()); ProvisionLock lock = new ProvisionLock(session.getApplicationId(), () -> {}); AtomicReference<TransientException> lastException = new AtomicReference<>(); while (true) { params.getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" + Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse("")); try { ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction()); provisioner.get().activate(preparedHosts, context, transaction); return; } catch (TransientException e) { lastException.set(e); try { Thread.sleep(durationBetweenResourceReadyChecks.toMillis()); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } }
I suppose this'll ultimately lead to a failed deployment, not retried by the controller during deployment jobs. Should we retry? If so, we need some way to communicate this.
private void waitForConfigToConverge(ApplicationId applicationId) { BooleanFlag verify = Flags.CHECK_CONFIG_CONVERGENCE_BEFORE_RESTARTING.bindTo(applicationRepository.flagSource()); if ( ! verify.value()) return; Instant end = clock.instant().plus(Duration.ofMinutes(10)); Duration timeout = Duration.ofSeconds(10); do { Application app = applicationRepository.getActiveApplication(applicationId); log.info("Wait for services in " + applicationId + " to converge on new generation before restarting"); ConfigConvergenceChecker convergenceChecker = applicationRepository.configConvergenceChecker(); ServiceListResponse response = convergenceChecker.getConfigGenerationsForAllServices(app, timeout); if (response.converged) { log.info("services converged on new generation " + response.currentGeneration); return; } else { log.info("services not converged on new generation, wanted generation: " + response.wantedGeneration + ", current generation: " + response.currentGeneration + ", will retry"); try { Thread.sleep(10_000); } catch (InterruptedException e) { /* ignore */ } } } while (clock.instant().isBefore(end)); throw new RuntimeException("Config has not converged"); }
throw new RuntimeException("Config has not converged");
private void waitForConfigToConverge(ApplicationId applicationId) { BooleanFlag verify = Flags.CHECK_CONFIG_CONVERGENCE_BEFORE_RESTARTING.bindTo(applicationRepository.flagSource()); if ( ! verify.value()) return; Duration timeout = Duration.ofSeconds(10); while (true) { try { params.get().getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for config convergence for " + applicationId); } catch (UncheckedTimeoutException e) { throw new ConfigNotConvergedException(e); } Application app = applicationRepository.getActiveApplication(applicationId); log.info(session.logPre() + "Wait for services to converge on new generation before restarting"); ConfigConvergenceChecker convergenceChecker = applicationRepository.configConvergenceChecker(); ServiceListResponse response = convergenceChecker.getConfigGenerationsForAllServices(app, timeout); if (response.converged) { log.info(session.logPre() + "Services converged on new generation " + response.currentGeneration); return; } else { log.info(session.logPre() + "Services not converged on new generation, wanted generation: " + response.wantedGeneration + ", current generation: " + response.currentGeneration + ", will retry"); try { Thread.sleep(10_000); } catch (InterruptedException e) { /* ignore */ } } } }
class Deployment implements com.yahoo.config.provision.Deployment { private static final Logger log = Logger.getLogger(Deployment.class.getName()); private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60); /** The session containing the application instance to activate */ private final Session session; private final ApplicationRepository applicationRepository; private final Supplier<PrepareParams> params; private final Optional<Provisioner> provisioner; private final Tenant tenant; private final DeployLogger deployLogger; private final Clock clock; private final boolean internalRedeploy; private boolean prepared; private ConfigChangeActions configChangeActions; private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock, boolean internalRedeploy, boolean prepared) { this.session = session; this.applicationRepository = applicationRepository; this.params = params; this.provisioner = provisioner; this.tenant = tenant; this.deployLogger = deployLogger; this.clock = clock; this.internalRedeploy = internalRedeploy; this.prepared = prepared; } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) { return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false); } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean validate, boolean isBootstrap) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false); } public static Deployment prepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean isBootstrap, boolean force) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true); } /** Prepares this. This does nothing if this is already prepared */ @Override public void prepare() { if (prepared) return; PrepareParams params = this.params.get(); ApplicationId applicationId = params.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.prepareMillis")) { this.configChangeActions = tenant.getSessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant()); this.prepared = true; } waitForResourcesOrTimeout(params, session, provisioner); } /** Activates this. If it is not already prepared, this will call prepare first. */ @Override public long activate() { prepare(); validateSessionStatus(session); PrepareParams params = this.params.get(); ApplicationId applicationId = session.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.activateMillis")) { TimeoutBudget timeoutBudget = params.getTimeoutBudget(); timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'"); Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force()); activation.awaitCompletion(timeoutBudget.timeLeft()); logActivatedMessage(applicationId, activation); if (provisioner.isPresent() && configChangeActions != null) restartServices(applicationId); storeReindexing(applicationId, session.getMetaData().getGeneration()); return session.getMetaData().getGeneration(); } } private void logActivatedMessage(ApplicationId applicationId, Activation activation) { Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId); String fileReferencesText = fileReferences.size() > 10 ? " " + fileReferences.size() + " file references" : "File references: " + fileReferences; log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " + provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") + ". Config generation " + session.getMetaData().getGeneration() + activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") + ". " + fileReferencesText); } private void restartServices(ApplicationId applicationId) { RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy); if ( ! restartActions.isEmpty()) { waitForConfigToConverge(applicationId); Set<String> hostnames = restartActions.getEntries().stream() .flatMap(entry -> entry.getServices().stream()) .map(ServiceInfo::getHostName) .collect(Collectors.toUnmodifiableSet()); provisioner.get().restart(applicationId, HostFilter.from(hostnames, Set.of(), Set.of(), Set.of())); deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s", hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", ")))); log.info(String.format("%sScheduled service restart of %d nodes: %s", session.logPre(), hostnames.size(), restartActions.format())); this.configChangeActions = new ConfigChangeActions( new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions()); } } private void storeReindexing(ApplicationId applicationId, long requiredSession) { applicationRepository.modifyReindexing(applicationId, reindexing -> { if (configChangeActions != null) for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries()) reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession); return reindexing; }); } /** * Request a restart of services of this application on hosts matching the filter. * This is sometimes needed after activation, but can also be requested without * doing prepare and activate in the same session. */ @Override public void restart(HostFilter filter) { provisioner.get().restart(session.getApplicationId(), filter); } /** Exposes the session of this for testing only */ public Session session() { return session; } /** * @return config change actions that need to be performed as result of prepare * @throws IllegalArgumentException if called without being prepared by this */ public ConfigChangeActions configChangeActions() { if (configChangeActions != null) return configChangeActions; throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared")); } private void validateSessionStatus(Session session) { long sessionId = session.getSessionId(); if (Session.Status.NEW.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared"); } else if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active"); } } /** * @param clock system clock * @param timeout total timeout duration of prepare + activate * @param session the local session for this deployment * @param isBootstrap true if this deployment is done to bootstrap the config server * @param ignoreValidationErrors whether this model should be validated * @param force whether activation of this model should be forced */ private static Supplier<PrepareParams> createPrepareParams( Clock clock, Duration timeout, Session session, boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) { return Suppliers.memoize(() -> { TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); PrepareParams.Builder params = new PrepareParams.Builder() .applicationId(session.getApplicationId()) .vespaVersion(session.getVespaVersion().toString()) .timeoutBudget(timeoutBudget) .ignoreValidationErrors(ignoreValidationErrors) .isBootstrap(isBootstrap) .force(force) .waitForResourcesInPrepare(waitForResourcesInPrepare) .tenantSecretStores(session.getTenantSecretStores()); session.getDockerImageRepository().ifPresent(params::dockerImageRepository); session.getAthenzDomain().ifPresent(params::athenzDomain); return params.build(); }); } private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) { if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return; Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts(); ActivationContext context = new ActivationContext(session.getSessionId()); ProvisionLock lock = new ProvisionLock(session.getApplicationId(), () -> {}); AtomicReference<TransientException> lastException = new AtomicReference<>(); while (true) { params.getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" + Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse("")); try { ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction()); provisioner.get().activate(preparedHosts, context, transaction); return; } catch (TransientException e) { lastException.set(e); try { Thread.sleep(durationBetweenResourceReadyChecks.toMillis()); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } }
class Deployment implements com.yahoo.config.provision.Deployment { private static final Logger log = Logger.getLogger(Deployment.class.getName()); private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60); /** The session containing the application instance to activate */ private final Session session; private final ApplicationRepository applicationRepository; private final Supplier<PrepareParams> params; private final Optional<Provisioner> provisioner; private final Tenant tenant; private final DeployLogger deployLogger; private final Clock clock; private final boolean internalRedeploy; private boolean prepared; private ConfigChangeActions configChangeActions; private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock, boolean internalRedeploy, boolean prepared) { this.session = session; this.applicationRepository = applicationRepository; this.params = params; this.provisioner = provisioner; this.tenant = tenant; this.deployLogger = deployLogger; this.clock = clock; this.internalRedeploy = internalRedeploy; this.prepared = prepared; } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) { return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false); } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean validate, boolean isBootstrap) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false); } public static Deployment prepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean isBootstrap, boolean force) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true); } /** Prepares this. This does nothing if this is already prepared */ @Override public void prepare() { if (prepared) return; PrepareParams params = this.params.get(); ApplicationId applicationId = params.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.prepareMillis")) { this.configChangeActions = tenant.getSessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant()); this.prepared = true; } waitForResourcesOrTimeout(params, session, provisioner); } /** Activates this. If it is not already prepared, this will call prepare first. */ @Override public long activate() { prepare(); validateSessionStatus(session); PrepareParams params = this.params.get(); ApplicationId applicationId = session.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.activateMillis")) { TimeoutBudget timeoutBudget = params.getTimeoutBudget(); timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'"); Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force()); activation.awaitCompletion(timeoutBudget.timeLeft()); logActivatedMessage(applicationId, activation); if (provisioner.isPresent() && configChangeActions != null) restartServices(applicationId); storeReindexing(applicationId, session.getMetaData().getGeneration()); return session.getMetaData().getGeneration(); } } private void logActivatedMessage(ApplicationId applicationId, Activation activation) { Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId); String fileReferencesText = fileReferences.size() > 10 ? " " + fileReferences.size() + " file references" : "File references: " + fileReferences; log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " + provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") + ". Config generation " + session.getMetaData().getGeneration() + activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") + ". " + fileReferencesText); } private void restartServices(ApplicationId applicationId) { RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy); if ( ! restartActions.isEmpty()) { waitForConfigToConverge(applicationId); Set<String> hostnames = restartActions.getEntries().stream() .flatMap(entry -> entry.getServices().stream()) .map(ServiceInfo::getHostName) .collect(Collectors.toUnmodifiableSet()); provisioner.get().restart(applicationId, HostFilter.from(hostnames, Set.of(), Set.of(), Set.of())); deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s", hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", ")))); log.info(String.format("%sScheduled service restart of %d nodes: %s", session.logPre(), hostnames.size(), restartActions.format())); this.configChangeActions = new ConfigChangeActions( new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions()); } } private void storeReindexing(ApplicationId applicationId, long requiredSession) { applicationRepository.modifyReindexing(applicationId, reindexing -> { if (configChangeActions != null) for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries()) reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession); return reindexing; }); } /** * Request a restart of services of this application on hosts matching the filter. * This is sometimes needed after activation, but can also be requested without * doing prepare and activate in the same session. */ @Override public void restart(HostFilter filter) { provisioner.get().restart(session.getApplicationId(), filter); } /** Exposes the session of this for testing only */ public Session session() { return session; } /** * @return config change actions that need to be performed as result of prepare * @throws IllegalArgumentException if called without being prepared by this */ public ConfigChangeActions configChangeActions() { if (configChangeActions != null) return configChangeActions; throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared")); } private void validateSessionStatus(Session session) { long sessionId = session.getSessionId(); if (Session.Status.NEW.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared"); } else if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active"); } } /** * @param clock system clock * @param timeout total timeout duration of prepare + activate * @param session the local session for this deployment * @param isBootstrap true if this deployment is done to bootstrap the config server * @param ignoreValidationErrors whether this model should be validated * @param force whether activation of this model should be forced */ private static Supplier<PrepareParams> createPrepareParams( Clock clock, Duration timeout, Session session, boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) { return Suppliers.memoize(() -> { TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); PrepareParams.Builder params = new PrepareParams.Builder() .applicationId(session.getApplicationId()) .vespaVersion(session.getVespaVersion().toString()) .timeoutBudget(timeoutBudget) .ignoreValidationErrors(ignoreValidationErrors) .isBootstrap(isBootstrap) .force(force) .waitForResourcesInPrepare(waitForResourcesInPrepare) .tenantSecretStores(session.getTenantSecretStores()); session.getDockerImageRepository().ifPresent(params::dockerImageRepository); session.getAthenzDomain().ifPresent(params::athenzDomain); return params.build(); }); } private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) { if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return; Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts(); ActivationContext context = new ActivationContext(session.getSessionId()); ProvisionLock lock = new ProvisionLock(session.getApplicationId(), () -> {}); AtomicReference<TransientException> lastException = new AtomicReference<>(); while (true) { params.getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" + Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse("")); try { ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction()); provisioner.get().activate(preparedHosts, context, transaction); return; } catch (TransientException e) { lastException.set(e); try { Thread.sleep(durationBetweenResourceReadyChecks.toMillis()); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } }
Yes, will do the same as in `waitForResources()`
private void restartServices(ApplicationId applicationId) { RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy); if ( ! restartActions.isEmpty()) { waitForConfigToConverge(applicationId); Set<String> hostnames = restartActions.getEntries().stream() .flatMap(entry -> entry.getServices().stream()) .map(ServiceInfo::getHostName) .collect(Collectors.toUnmodifiableSet()); provisioner.get().restart(applicationId, HostFilter.from(hostnames, Set.of(), Set.of(), Set.of())); deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s", hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", ")))); log.info(String.format("%sScheduled service restart of %d nodes: %s", session.logPre(), hostnames.size(), restartActions.format())); this.configChangeActions = new ConfigChangeActions( new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions()); } }
waitForConfigToConverge(applicationId);
private void restartServices(ApplicationId applicationId) { RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy); if ( ! restartActions.isEmpty()) { waitForConfigToConverge(applicationId); Set<String> hostnames = restartActions.getEntries().stream() .flatMap(entry -> entry.getServices().stream()) .map(ServiceInfo::getHostName) .collect(Collectors.toUnmodifiableSet()); provisioner.get().restart(applicationId, HostFilter.from(hostnames, Set.of(), Set.of(), Set.of())); deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s", hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", ")))); log.info(String.format("%sScheduled service restart of %d nodes: %s", session.logPre(), hostnames.size(), restartActions.format())); this.configChangeActions = new ConfigChangeActions( new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions()); } }
class Deployment implements com.yahoo.config.provision.Deployment { private static final Logger log = Logger.getLogger(Deployment.class.getName()); private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60); /** The session containing the application instance to activate */ private final Session session; private final ApplicationRepository applicationRepository; private final Supplier<PrepareParams> params; private final Optional<Provisioner> provisioner; private final Tenant tenant; private final DeployLogger deployLogger; private final Clock clock; private final boolean internalRedeploy; private boolean prepared; private ConfigChangeActions configChangeActions; private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock, boolean internalRedeploy, boolean prepared) { this.session = session; this.applicationRepository = applicationRepository; this.params = params; this.provisioner = provisioner; this.tenant = tenant; this.deployLogger = deployLogger; this.clock = clock; this.internalRedeploy = internalRedeploy; this.prepared = prepared; } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) { return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false); } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean validate, boolean isBootstrap) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false); } public static Deployment prepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean isBootstrap, boolean force) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true); } /** Prepares this. This does nothing if this is already prepared */ @Override public void prepare() { if (prepared) return; PrepareParams params = this.params.get(); ApplicationId applicationId = params.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.prepareMillis")) { this.configChangeActions = tenant.getSessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant()); this.prepared = true; } waitForResourcesOrTimeout(params, session, provisioner); } /** Activates this. If it is not already prepared, this will call prepare first. */ @Override public long activate() { prepare(); validateSessionStatus(session); PrepareParams params = this.params.get(); ApplicationId applicationId = session.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.activateMillis")) { TimeoutBudget timeoutBudget = params.getTimeoutBudget(); timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'"); Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force()); activation.awaitCompletion(timeoutBudget.timeLeft()); logActivatedMessage(applicationId, activation); if (provisioner.isPresent() && configChangeActions != null) restartServices(applicationId); storeReindexing(applicationId, session.getMetaData().getGeneration()); return session.getMetaData().getGeneration(); } } private void logActivatedMessage(ApplicationId applicationId, Activation activation) { Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId); String fileReferencesText = fileReferences.size() > 10 ? " " + fileReferences.size() + " file references" : "File references: " + fileReferences; log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " + provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") + ". Config generation " + session.getMetaData().getGeneration() + activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") + ". " + fileReferencesText); } private void waitForConfigToConverge(ApplicationId applicationId) { BooleanFlag verify = Flags.CHECK_CONFIG_CONVERGENCE_BEFORE_RESTARTING.bindTo(applicationRepository.flagSource()); if ( ! verify.value()) return; Instant end = clock.instant().plus(Duration.ofMinutes(10)); Duration timeout = Duration.ofSeconds(10); do { Application app = applicationRepository.getActiveApplication(applicationId); log.info("Wait for services in " + applicationId + " to converge on new generation before restarting"); ConfigConvergenceChecker convergenceChecker = applicationRepository.configConvergenceChecker(); ServiceListResponse response = convergenceChecker.getConfigGenerationsForAllServices(app, timeout); if (response.converged) { log.info("services converged on new generation " + response.currentGeneration); return; } else { log.info("services not converged on new generation, wanted generation: " + response.wantedGeneration + ", current generation: " + response.currentGeneration + ", will retry"); try { Thread.sleep(10_000); } catch (InterruptedException e) { /* ignore */ } } } while (clock.instant().isBefore(end)); throw new RuntimeException("Config has not converged"); } private void storeReindexing(ApplicationId applicationId, long requiredSession) { applicationRepository.modifyReindexing(applicationId, reindexing -> { if (configChangeActions != null) for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries()) reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession); return reindexing; }); } /** * Request a restart of services of this application on hosts matching the filter. * This is sometimes needed after activation, but can also be requested without * doing prepare and activate in the same session. */ @Override public void restart(HostFilter filter) { provisioner.get().restart(session.getApplicationId(), filter); } /** Exposes the session of this for testing only */ public Session session() { return session; } /** * @return config change actions that need to be performed as result of prepare * @throws IllegalArgumentException if called without being prepared by this */ public ConfigChangeActions configChangeActions() { if (configChangeActions != null) return configChangeActions; throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared")); } private void validateSessionStatus(Session session) { long sessionId = session.getSessionId(); if (Session.Status.NEW.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared"); } else if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active"); } } /** * @param clock system clock * @param timeout total timeout duration of prepare + activate * @param session the local session for this deployment * @param isBootstrap true if this deployment is done to bootstrap the config server * @param ignoreValidationErrors whether this model should be validated * @param force whether activation of this model should be forced */ private static Supplier<PrepareParams> createPrepareParams( Clock clock, Duration timeout, Session session, boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) { return Suppliers.memoize(() -> { TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); PrepareParams.Builder params = new PrepareParams.Builder() .applicationId(session.getApplicationId()) .vespaVersion(session.getVespaVersion().toString()) .timeoutBudget(timeoutBudget) .ignoreValidationErrors(ignoreValidationErrors) .isBootstrap(isBootstrap) .force(force) .waitForResourcesInPrepare(waitForResourcesInPrepare) .tenantSecretStores(session.getTenantSecretStores()); session.getDockerImageRepository().ifPresent(params::dockerImageRepository); session.getAthenzDomain().ifPresent(params::athenzDomain); return params.build(); }); } private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) { if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return; Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts(); ActivationContext context = new ActivationContext(session.getSessionId()); ProvisionLock lock = new ProvisionLock(session.getApplicationId(), () -> {}); AtomicReference<TransientException> lastException = new AtomicReference<>(); while (true) { params.getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" + Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse("")); try { ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction()); provisioner.get().activate(preparedHosts, context, transaction); return; } catch (TransientException e) { lastException.set(e); try { Thread.sleep(durationBetweenResourceReadyChecks.toMillis()); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } }
class Deployment implements com.yahoo.config.provision.Deployment { private static final Logger log = Logger.getLogger(Deployment.class.getName()); private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60); /** The session containing the application instance to activate */ private final Session session; private final ApplicationRepository applicationRepository; private final Supplier<PrepareParams> params; private final Optional<Provisioner> provisioner; private final Tenant tenant; private final DeployLogger deployLogger; private final Clock clock; private final boolean internalRedeploy; private boolean prepared; private ConfigChangeActions configChangeActions; private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock, boolean internalRedeploy, boolean prepared) { this.session = session; this.applicationRepository = applicationRepository; this.params = params; this.provisioner = provisioner; this.tenant = tenant; this.deployLogger = deployLogger; this.clock = clock; this.internalRedeploy = internalRedeploy; this.prepared = prepared; } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) { return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false); } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean validate, boolean isBootstrap) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false); } public static Deployment prepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean isBootstrap, boolean force) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true); } /** Prepares this. This does nothing if this is already prepared */ @Override public void prepare() { if (prepared) return; PrepareParams params = this.params.get(); ApplicationId applicationId = params.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.prepareMillis")) { this.configChangeActions = tenant.getSessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant()); this.prepared = true; } waitForResourcesOrTimeout(params, session, provisioner); } /** Activates this. If it is not already prepared, this will call prepare first. */ @Override public long activate() { prepare(); validateSessionStatus(session); PrepareParams params = this.params.get(); ApplicationId applicationId = session.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.activateMillis")) { TimeoutBudget timeoutBudget = params.getTimeoutBudget(); timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'"); Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force()); activation.awaitCompletion(timeoutBudget.timeLeft()); logActivatedMessage(applicationId, activation); if (provisioner.isPresent() && configChangeActions != null) restartServices(applicationId); storeReindexing(applicationId, session.getMetaData().getGeneration()); return session.getMetaData().getGeneration(); } } private void logActivatedMessage(ApplicationId applicationId, Activation activation) { Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId); String fileReferencesText = fileReferences.size() > 10 ? " " + fileReferences.size() + " file references" : "File references: " + fileReferences; log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " + provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") + ". Config generation " + session.getMetaData().getGeneration() + activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") + ". " + fileReferencesText); } private void waitForConfigToConverge(ApplicationId applicationId) { BooleanFlag verify = Flags.CHECK_CONFIG_CONVERGENCE_BEFORE_RESTARTING.bindTo(applicationRepository.flagSource()); if ( ! verify.value()) return; Duration timeout = Duration.ofSeconds(10); while (true) { try { params.get().getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for config convergence for " + applicationId); } catch (UncheckedTimeoutException e) { throw new ConfigNotConvergedException(e); } Application app = applicationRepository.getActiveApplication(applicationId); log.info(session.logPre() + "Wait for services to converge on new generation before restarting"); ConfigConvergenceChecker convergenceChecker = applicationRepository.configConvergenceChecker(); ServiceListResponse response = convergenceChecker.getConfigGenerationsForAllServices(app, timeout); if (response.converged) { log.info(session.logPre() + "Services converged on new generation " + response.currentGeneration); return; } else { log.info(session.logPre() + "Services not converged on new generation, wanted generation: " + response.wantedGeneration + ", current generation: " + response.currentGeneration + ", will retry"); try { Thread.sleep(10_000); } catch (InterruptedException e) { /* ignore */ } } } } private void storeReindexing(ApplicationId applicationId, long requiredSession) { applicationRepository.modifyReindexing(applicationId, reindexing -> { if (configChangeActions != null) for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries()) reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession); return reindexing; }); } /** * Request a restart of services of this application on hosts matching the filter. * This is sometimes needed after activation, but can also be requested without * doing prepare and activate in the same session. */ @Override public void restart(HostFilter filter) { provisioner.get().restart(session.getApplicationId(), filter); } /** Exposes the session of this for testing only */ public Session session() { return session; } /** * @return config change actions that need to be performed as result of prepare * @throws IllegalArgumentException if called without being prepared by this */ public ConfigChangeActions configChangeActions() { if (configChangeActions != null) return configChangeActions; throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared")); } private void validateSessionStatus(Session session) { long sessionId = session.getSessionId(); if (Session.Status.NEW.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared"); } else if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active"); } } /** * @param clock system clock * @param timeout total timeout duration of prepare + activate * @param session the local session for this deployment * @param isBootstrap true if this deployment is done to bootstrap the config server * @param ignoreValidationErrors whether this model should be validated * @param force whether activation of this model should be forced */ private static Supplier<PrepareParams> createPrepareParams( Clock clock, Duration timeout, Session session, boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) { return Suppliers.memoize(() -> { TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); PrepareParams.Builder params = new PrepareParams.Builder() .applicationId(session.getApplicationId()) .vespaVersion(session.getVespaVersion().toString()) .timeoutBudget(timeoutBudget) .ignoreValidationErrors(ignoreValidationErrors) .isBootstrap(isBootstrap) .force(force) .waitForResourcesInPrepare(waitForResourcesInPrepare) .tenantSecretStores(session.getTenantSecretStores()); session.getDockerImageRepository().ifPresent(params::dockerImageRepository); session.getAthenzDomain().ifPresent(params::athenzDomain); return params.build(); }); } private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) { if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return; Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts(); ActivationContext context = new ActivationContext(session.getSessionId()); ProvisionLock lock = new ProvisionLock(session.getApplicationId(), () -> {}); AtomicReference<TransientException> lastException = new AtomicReference<>(); while (true) { params.getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" + Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse("")); try { ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction()); provisioner.get().activate(preparedHosts, context, transaction); return; } catch (TransientException e) { lastException.set(e); try { Thread.sleep(durationBetweenResourceReadyChecks.toMillis()); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } }
Yes, will look into it
private void waitForConfigToConverge(ApplicationId applicationId) { BooleanFlag verify = Flags.CHECK_CONFIG_CONVERGENCE_BEFORE_RESTARTING.bindTo(applicationRepository.flagSource()); if ( ! verify.value()) return; Instant end = clock.instant().plus(Duration.ofMinutes(10)); Duration timeout = Duration.ofSeconds(10); do { Application app = applicationRepository.getActiveApplication(applicationId); log.info("Wait for services in " + applicationId + " to converge on new generation before restarting"); ConfigConvergenceChecker convergenceChecker = applicationRepository.configConvergenceChecker(); ServiceListResponse response = convergenceChecker.getConfigGenerationsForAllServices(app, timeout); if (response.converged) { log.info("services converged on new generation " + response.currentGeneration); return; } else { log.info("services not converged on new generation, wanted generation: " + response.wantedGeneration + ", current generation: " + response.currentGeneration + ", will retry"); try { Thread.sleep(10_000); } catch (InterruptedException e) { /* ignore */ } } } while (clock.instant().isBefore(end)); throw new RuntimeException("Config has not converged"); }
throw new RuntimeException("Config has not converged");
private void waitForConfigToConverge(ApplicationId applicationId) { BooleanFlag verify = Flags.CHECK_CONFIG_CONVERGENCE_BEFORE_RESTARTING.bindTo(applicationRepository.flagSource()); if ( ! verify.value()) return; Duration timeout = Duration.ofSeconds(10); while (true) { try { params.get().getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for config convergence for " + applicationId); } catch (UncheckedTimeoutException e) { throw new ConfigNotConvergedException(e); } Application app = applicationRepository.getActiveApplication(applicationId); log.info(session.logPre() + "Wait for services to converge on new generation before restarting"); ConfigConvergenceChecker convergenceChecker = applicationRepository.configConvergenceChecker(); ServiceListResponse response = convergenceChecker.getConfigGenerationsForAllServices(app, timeout); if (response.converged) { log.info(session.logPre() + "Services converged on new generation " + response.currentGeneration); return; } else { log.info(session.logPre() + "Services not converged on new generation, wanted generation: " + response.wantedGeneration + ", current generation: " + response.currentGeneration + ", will retry"); try { Thread.sleep(10_000); } catch (InterruptedException e) { /* ignore */ } } } }
class Deployment implements com.yahoo.config.provision.Deployment { private static final Logger log = Logger.getLogger(Deployment.class.getName()); private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60); /** The session containing the application instance to activate */ private final Session session; private final ApplicationRepository applicationRepository; private final Supplier<PrepareParams> params; private final Optional<Provisioner> provisioner; private final Tenant tenant; private final DeployLogger deployLogger; private final Clock clock; private final boolean internalRedeploy; private boolean prepared; private ConfigChangeActions configChangeActions; private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock, boolean internalRedeploy, boolean prepared) { this.session = session; this.applicationRepository = applicationRepository; this.params = params; this.provisioner = provisioner; this.tenant = tenant; this.deployLogger = deployLogger; this.clock = clock; this.internalRedeploy = internalRedeploy; this.prepared = prepared; } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) { return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false); } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean validate, boolean isBootstrap) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false); } public static Deployment prepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean isBootstrap, boolean force) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true); } /** Prepares this. This does nothing if this is already prepared */ @Override public void prepare() { if (prepared) return; PrepareParams params = this.params.get(); ApplicationId applicationId = params.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.prepareMillis")) { this.configChangeActions = tenant.getSessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant()); this.prepared = true; } waitForResourcesOrTimeout(params, session, provisioner); } /** Activates this. If it is not already prepared, this will call prepare first. */ @Override public long activate() { prepare(); validateSessionStatus(session); PrepareParams params = this.params.get(); ApplicationId applicationId = session.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.activateMillis")) { TimeoutBudget timeoutBudget = params.getTimeoutBudget(); timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'"); Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force()); activation.awaitCompletion(timeoutBudget.timeLeft()); logActivatedMessage(applicationId, activation); if (provisioner.isPresent() && configChangeActions != null) restartServices(applicationId); storeReindexing(applicationId, session.getMetaData().getGeneration()); return session.getMetaData().getGeneration(); } } private void logActivatedMessage(ApplicationId applicationId, Activation activation) { Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId); String fileReferencesText = fileReferences.size() > 10 ? " " + fileReferences.size() + " file references" : "File references: " + fileReferences; log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " + provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") + ". Config generation " + session.getMetaData().getGeneration() + activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") + ". " + fileReferencesText); } private void restartServices(ApplicationId applicationId) { RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy); if ( ! restartActions.isEmpty()) { waitForConfigToConverge(applicationId); Set<String> hostnames = restartActions.getEntries().stream() .flatMap(entry -> entry.getServices().stream()) .map(ServiceInfo::getHostName) .collect(Collectors.toUnmodifiableSet()); provisioner.get().restart(applicationId, HostFilter.from(hostnames, Set.of(), Set.of(), Set.of())); deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s", hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", ")))); log.info(String.format("%sScheduled service restart of %d nodes: %s", session.logPre(), hostnames.size(), restartActions.format())); this.configChangeActions = new ConfigChangeActions( new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions()); } } private void storeReindexing(ApplicationId applicationId, long requiredSession) { applicationRepository.modifyReindexing(applicationId, reindexing -> { if (configChangeActions != null) for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries()) reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession); return reindexing; }); } /** * Request a restart of services of this application on hosts matching the filter. * This is sometimes needed after activation, but can also be requested without * doing prepare and activate in the same session. */ @Override public void restart(HostFilter filter) { provisioner.get().restart(session.getApplicationId(), filter); } /** Exposes the session of this for testing only */ public Session session() { return session; } /** * @return config change actions that need to be performed as result of prepare * @throws IllegalArgumentException if called without being prepared by this */ public ConfigChangeActions configChangeActions() { if (configChangeActions != null) return configChangeActions; throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared")); } private void validateSessionStatus(Session session) { long sessionId = session.getSessionId(); if (Session.Status.NEW.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared"); } else if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active"); } } /** * @param clock system clock * @param timeout total timeout duration of prepare + activate * @param session the local session for this deployment * @param isBootstrap true if this deployment is done to bootstrap the config server * @param ignoreValidationErrors whether this model should be validated * @param force whether activation of this model should be forced */ private static Supplier<PrepareParams> createPrepareParams( Clock clock, Duration timeout, Session session, boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) { return Suppliers.memoize(() -> { TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); PrepareParams.Builder params = new PrepareParams.Builder() .applicationId(session.getApplicationId()) .vespaVersion(session.getVespaVersion().toString()) .timeoutBudget(timeoutBudget) .ignoreValidationErrors(ignoreValidationErrors) .isBootstrap(isBootstrap) .force(force) .waitForResourcesInPrepare(waitForResourcesInPrepare) .tenantSecretStores(session.getTenantSecretStores()); session.getDockerImageRepository().ifPresent(params::dockerImageRepository); session.getAthenzDomain().ifPresent(params::athenzDomain); return params.build(); }); } private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) { if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return; Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts(); ActivationContext context = new ActivationContext(session.getSessionId()); ProvisionLock lock = new ProvisionLock(session.getApplicationId(), () -> {}); AtomicReference<TransientException> lastException = new AtomicReference<>(); while (true) { params.getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" + Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse("")); try { ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction()); provisioner.get().activate(preparedHosts, context, transaction); return; } catch (TransientException e) { lastException.set(e); try { Thread.sleep(durationBetweenResourceReadyChecks.toMillis()); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } }
class Deployment implements com.yahoo.config.provision.Deployment { private static final Logger log = Logger.getLogger(Deployment.class.getName()); private static final Duration durationBetweenResourceReadyChecks = Duration.ofSeconds(60); /** The session containing the application instance to activate */ private final Session session; private final ApplicationRepository applicationRepository; private final Supplier<PrepareParams> params; private final Optional<Provisioner> provisioner; private final Tenant tenant; private final DeployLogger deployLogger; private final Clock clock; private final boolean internalRedeploy; private boolean prepared; private ConfigChangeActions configChangeActions; private Deployment(Session session, ApplicationRepository applicationRepository, Supplier<PrepareParams> params, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger deployLogger, Clock clock, boolean internalRedeploy, boolean prepared) { this.session = session; this.applicationRepository = applicationRepository; this.params = params; this.provisioner = provisioner; this.tenant = tenant; this.deployLogger = deployLogger; this.clock = clock; this.internalRedeploy = internalRedeploy; this.prepared = prepared; } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, PrepareParams params, DeployLogger logger, Clock clock) { return new Deployment(session, applicationRepository, () -> params, provisioner, tenant, logger, clock, false, false); } public static Deployment unprepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean validate, boolean isBootstrap) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, !validate, false, true); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, true, false); } public static Deployment prepared(Session session, ApplicationRepository applicationRepository, Optional<Provisioner> provisioner, Tenant tenant, DeployLogger logger, Duration timeout, Clock clock, boolean isBootstrap, boolean force) { Supplier<PrepareParams> params = createPrepareParams(clock, timeout, session, isBootstrap, false, force, false); return new Deployment(session, applicationRepository, params, provisioner, tenant, logger, clock, false, true); } /** Prepares this. This does nothing if this is already prepared */ @Override public void prepare() { if (prepared) return; PrepareParams params = this.params.get(); ApplicationId applicationId = params.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.prepareMillis")) { this.configChangeActions = tenant.getSessionRepository().prepareLocalSession(session, deployLogger, params, clock.instant()); this.prepared = true; } waitForResourcesOrTimeout(params, session, provisioner); } /** Activates this. If it is not already prepared, this will call prepare first. */ @Override public long activate() { prepare(); validateSessionStatus(session); PrepareParams params = this.params.get(); ApplicationId applicationId = session.getApplicationId(); try (ActionTimer timer = applicationRepository.timerFor(applicationId, "deployment.activateMillis")) { TimeoutBudget timeoutBudget = params.getTimeoutBudget(); timeoutBudget.assertNotTimedOut(() -> "Timeout exceeded when trying to activate '" + applicationId + "'"); Activation activation = applicationRepository.activate(session, applicationId, tenant, params.force()); activation.awaitCompletion(timeoutBudget.timeLeft()); logActivatedMessage(applicationId, activation); if (provisioner.isPresent() && configChangeActions != null) restartServices(applicationId); storeReindexing(applicationId, session.getMetaData().getGeneration()); return session.getMetaData().getGeneration(); } } private void logActivatedMessage(ApplicationId applicationId, Activation activation) { Set<FileReference> fileReferences = applicationRepository.getFileReferences(applicationId); String fileReferencesText = fileReferences.size() > 10 ? " " + fileReferences.size() + " file references" : "File references: " + fileReferences; log.log(Level.INFO, session.logPre() + "Session " + session.getSessionId() + " activated successfully using " + provisioner.map(provisioner -> provisioner.getClass().getSimpleName()).orElse("no host provisioner") + ". Config generation " + session.getMetaData().getGeneration() + activation.sourceSessionId().stream().mapToObj(id -> ". Based on session " + id).findFirst().orElse("") + ". " + fileReferencesText); } private void restartServices(ApplicationId applicationId) { RestartActions restartActions = configChangeActions.getRestartActions().useForInternalRestart(internalRedeploy); if ( ! restartActions.isEmpty()) { waitForConfigToConverge(applicationId); Set<String> hostnames = restartActions.getEntries().stream() .flatMap(entry -> entry.getServices().stream()) .map(ServiceInfo::getHostName) .collect(Collectors.toUnmodifiableSet()); provisioner.get().restart(applicationId, HostFilter.from(hostnames, Set.of(), Set.of(), Set.of())); deployLogger.log(Level.INFO, String.format("Scheduled service restart of %d nodes: %s", hostnames.size(), hostnames.stream().sorted().collect(Collectors.joining(", ")))); log.info(String.format("%sScheduled service restart of %d nodes: %s", session.logPre(), hostnames.size(), restartActions.format())); this.configChangeActions = new ConfigChangeActions( new RestartActions(), configChangeActions.getRefeedActions(), configChangeActions.getReindexActions()); } } private void storeReindexing(ApplicationId applicationId, long requiredSession) { applicationRepository.modifyReindexing(applicationId, reindexing -> { if (configChangeActions != null) for (ReindexActions.Entry entry : configChangeActions.getReindexActions().getEntries()) reindexing = reindexing.withPending(entry.getClusterName(), entry.getDocumentType(), requiredSession); return reindexing; }); } /** * Request a restart of services of this application on hosts matching the filter. * This is sometimes needed after activation, but can also be requested without * doing prepare and activate in the same session. */ @Override public void restart(HostFilter filter) { provisioner.get().restart(session.getApplicationId(), filter); } /** Exposes the session of this for testing only */ public Session session() { return session; } /** * @return config change actions that need to be performed as result of prepare * @throws IllegalArgumentException if called without being prepared by this */ public ConfigChangeActions configChangeActions() { if (configChangeActions != null) return configChangeActions; throw new IllegalArgumentException("No config change actions: " + (prepared ? "was already prepared" : "not yet prepared")); } private void validateSessionStatus(Session session) { long sessionId = session.getSessionId(); if (Session.Status.NEW.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is not prepared"); } else if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalArgumentException(session.logPre() + "Session " + sessionId + " is already active"); } } /** * @param clock system clock * @param timeout total timeout duration of prepare + activate * @param session the local session for this deployment * @param isBootstrap true if this deployment is done to bootstrap the config server * @param ignoreValidationErrors whether this model should be validated * @param force whether activation of this model should be forced */ private static Supplier<PrepareParams> createPrepareParams( Clock clock, Duration timeout, Session session, boolean isBootstrap, boolean ignoreValidationErrors, boolean force, boolean waitForResourcesInPrepare) { return Suppliers.memoize(() -> { TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); PrepareParams.Builder params = new PrepareParams.Builder() .applicationId(session.getApplicationId()) .vespaVersion(session.getVespaVersion().toString()) .timeoutBudget(timeoutBudget) .ignoreValidationErrors(ignoreValidationErrors) .isBootstrap(isBootstrap) .force(force) .waitForResourcesInPrepare(waitForResourcesInPrepare) .tenantSecretStores(session.getTenantSecretStores()); session.getDockerImageRepository().ifPresent(params::dockerImageRepository); session.getAthenzDomain().ifPresent(params::athenzDomain); return params.build(); }); } private static void waitForResourcesOrTimeout(PrepareParams params, Session session, Optional<Provisioner> provisioner) { if (!params.waitForResourcesInPrepare() || provisioner.isEmpty()) return; Set<HostSpec> preparedHosts = session.getAllocatedHosts().getHosts(); ActivationContext context = new ActivationContext(session.getSessionId()); ProvisionLock lock = new ProvisionLock(session.getApplicationId(), () -> {}); AtomicReference<TransientException> lastException = new AtomicReference<>(); while (true) { params.getTimeoutBudget().assertNotTimedOut( () -> "Timeout exceeded while waiting for application resources of '" + session.getApplicationId() + "'" + Optional.ofNullable(lastException.get()).map(e -> ". Last exception: " + e.getMessage()).orElse("")); try { ApplicationTransaction transaction = new ApplicationTransaction(lock, new NestedTransaction()); provisioner.get().activate(preparedHosts, context, transaction); return; } catch (TransientException e) { lastException.set(e); try { Thread.sleep(durationBetweenResourceReadyChecks.toMillis()); } catch (InterruptedException e1) { throw new RuntimeException(e1); } } } } }
Maybe we can extend the orchestrator API to allow listing all suspended hosts, and use that? So we get away with one request instead of hundreds. @hakonhall may have some thoughts on performance impact.
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
if (suspended(host)) return false;
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
Validating dependencies can make sense for non-hosted as well, so perhaps we should do that regardless?
private void validatePomXml(DeployLogger deployLogger, boolean isHosted, String jarFilename, String pomXmlContent) { if (isHosted) { try { Document pom = DocumentBuilderFactory.newDefaultInstance().newDocumentBuilder() .parse(new InputSource(new StringReader(pomXmlContent))); validateDependencies(deployLogger, jarFilename, pom); validateRepositories(deployLogger, jarFilename, pom); } catch (ParserConfigurationException e) { throw new RuntimeException(e); } catch (Exception e) { deployLogger.log(Level.INFO, String.format("Unable to parse pom.xml from %s", jarFilename)); } } }
validateDependencies(deployLogger, jarFilename, pom);
private void validatePomXml(DeployLogger deployLogger, boolean isHosted, String jarFilename, String pomXmlContent) { if (isHosted) { try { Document pom = DocumentBuilderFactory.newDefaultInstance().newDocumentBuilder() .parse(new InputSource(new StringReader(pomXmlContent))); validateDependencies(deployLogger, jarFilename, pom); validateRepositories(deployLogger, jarFilename, pom); } catch (ParserConfigurationException e) { throw new RuntimeException(e); } catch (Exception e) { deployLogger.log(Level.INFO, String.format("Unable to parse pom.xml from %s", jarFilename)); } } }
class BundleValidator extends Validator { public BundleValidator() {} @Override public void validate(VespaModel model, DeployState deployState) { ApplicationPackage app = deployState.getApplicationPackage(); for (ComponentInfo info : app.getComponentsInfo(deployState.getVespaVersion())) { Path path = Path.fromString(info.getPathRelativeToAppDir()); try { DeployLogger deployLogger = deployState.getDeployLogger(); deployLogger.log(Level.FINE, String.format("Validating bundle at '%s'", path)); JarFile jarFile = new JarFile(app.getFileReference(path)); validateJarFile(deployLogger, deployState.isHosted(), jarFile); } catch (IOException e) { throw new IllegalArgumentException( "Failed to validate JAR file '" + path.last() + "'", e); } } } void validateJarFile(DeployLogger deployLogger, boolean isHosted, JarFile jarFile) throws IOException { Manifest manifest = jarFile.getManifest(); String filename = Paths.get(jarFile.getName()).getFileName().toString(); if (manifest == null) { throw new IllegalArgumentException("Non-existing or invalid manifest in " + filename); } validateManifest(deployLogger, filename, manifest); getPomXmlContent(deployLogger, jarFile) .ifPresent(pomXml -> validatePomXml(deployLogger, isHosted, filename, pomXml)); } private void validateManifest(DeployLogger deployLogger, String filename, Manifest mf) { Attributes attributes = mf.getMainAttributes(); HashSet<String> mfAttributes = new HashSet<>(); for (Map.Entry<Object,Object> entry : attributes.entrySet()) { mfAttributes.add(entry.getKey().toString()); } List<String> requiredOSGIHeaders = Arrays.asList( "Bundle-ManifestVersion", "Bundle-Name", "Bundle-SymbolicName", "Bundle-Version"); for (String header : requiredOSGIHeaders) { if (!mfAttributes.contains(header)) { throw new IllegalArgumentException("Required OSGI header '" + header + "' was not found in manifest in '" + filename + "'"); } } if (attributes.getValue("Bundle-Version").endsWith(".SNAPSHOT")) { deployLogger.logApplicationPackage(Level.WARNING, "Deploying snapshot bundle " + filename + ".\nTo use this bundle, you must include the qualifier 'SNAPSHOT' in the version specification in services.xml."); } if (attributes.getValue("Import-Package") != null) { validateImportedPackages(deployLogger, filename, mf); } } private static void validateImportedPackages(DeployLogger deployLogger, String filename, Manifest manifest) { Domain osgiHeaders = Domain.domain(manifest); Parameters importPackage = osgiHeaders.getImportPackage(); Map<DeprecatedProvidedBundle, List<String>> deprecatedPackagesInUse = new HashMap<>(); importPackage.forEach((packageName, attrs) -> { VersionRange versionRange = attrs.getVersion() != null ? VersionRange.parseOSGiVersionRange(attrs.getVersion()) : null; for (DeprecatedProvidedBundle deprecatedBundle : DeprecatedProvidedBundle.values()) { for (Predicate<String> matcher : deprecatedBundle.javaPackageMatchers) { if (matcher.test(packageName) && (versionRange == null || deprecatedBundle.versionDiscriminator.test(versionRange))) { deprecatedPackagesInUse.computeIfAbsent(deprecatedBundle, __ -> new ArrayList<>()) .add(packageName); } } } }); deprecatedPackagesInUse.forEach((artifact, packagesInUse) -> { deployLogger.logApplicationPackage(Level.WARNING, String.format("For JAR file '%s': \n" + "Manifest imports the following Java packages from '%s': %s. \n" + "%s", filename, artifact.name, packagesInUse, artifact.description)); }); } private static final Pattern POM_FILE_LOCATION = Pattern.compile("META-INF/maven/.+?/.+?/pom.xml"); private Optional<String> getPomXmlContent(DeployLogger deployLogger, JarFile jarFile) { return jarFile.stream() .filter(f -> POM_FILE_LOCATION.matcher(f.getName()).matches()) .findFirst() .map(f -> { try { return new String(jarFile.getInputStream(f).readAllBytes()); } catch (IOException e) { deployLogger.log(Level.INFO, String.format("Unable to read '%s' from '%s'", f.getName(), jarFile.getName())); return null; } }); } private static void validateDependencies(DeployLogger deployLogger, String jarFilename, Document pom) throws XPathExpressionException { forEachPomXmlElement(pom, "dependencies/dependency", dependency -> { String groupId = dependency.getElementsByTagName("groupId").item(0).getTextContent(); String artifactId = dependency.getElementsByTagName("artifactId").item(0).getTextContent(); for (DeprecatedMavenArtifact deprecatedArtifact : DeprecatedMavenArtifact.values()) { if (groupId.equals(deprecatedArtifact.groupId) && artifactId.equals(deprecatedArtifact.artifactId)) { deployLogger.logApplicationPackage(Level.WARNING, String.format( "The pom.xml of bundle '%s' includes a dependency to the artifact '%s:%s'. \n%s", jarFilename, groupId, artifactId, deprecatedArtifact.description)); } } }); } private static void validateRepositories(DeployLogger deployLogger, String jarFilename, Document pom) throws XPathExpressionException { forEachPomXmlElement(pom, "pluginRepositories/pluginRepository", repository -> validateRepository(deployLogger, jarFilename, "pluginRepositories", repository)); forEachPomXmlElement(pom, "repositories/repository", repository -> validateRepository(deployLogger, jarFilename, "repositories", repository)); } private static void validateRepository(DeployLogger deployLogger, String jarFilename, String parentElementName, Element element) { String url = element.getElementsByTagName("url").item(0).getTextContent(); if (url.contains("vespa-maven-libs-release-local")) { deployLogger.logApplicationPackage(Level.WARNING, String.format("<%s> in pom.xml of '%s' uses deprecated Maven repository '%s'.\n See announcement.", parentElementName, jarFilename, url)); } } private static void forEachPomXmlElement(Document pom, String xpath, Consumer<Element> consumer) throws XPathExpressionException { NodeList dependencies = (NodeList) XPathFactory.newDefaultInstance().newXPath() .compile("/project/" + xpath) .evaluate(pom, XPathConstants.NODESET); for (int i = 0; i < dependencies.getLength(); i++) { Element element = (Element) dependencies.item(i); consumer.accept(element); } } private enum DeprecatedMavenArtifact { VESPA_HTTP_CLIENT_EXTENSION("com.yahoo.vespa", "vespa-http-client-extensions", "This artifact will be removed in Vespa 8. " + "Programmatic use can be safely removed from system/staging tests. " + "See internal Vespa 8 release notes for details."); final String groupId; final String artifactId; final String description; DeprecatedMavenArtifact(String groupId, String artifactId, String description) { this.groupId = groupId; this.artifactId = artifactId; this.description = description; } } private enum DeprecatedProvidedBundle { ORG_JSON("org.json:json", "The org.json library will no longer provided by jdisc runtime on Vespa 8. " + "See https: Set.of("org\\.json")); final String name; final Collection<Predicate<String>> javaPackageMatchers; final Predicate<VersionRange> versionDiscriminator; final String description; DeprecatedProvidedBundle(String name, String description, Collection<String> javaPackagePatterns) { this(name, description, __ -> true, javaPackagePatterns); } DeprecatedProvidedBundle(String name, String description, Predicate<VersionRange> versionDiscriminator, Collection<String> javaPackagePatterns) { this.name = name; this.javaPackageMatchers = javaPackagePatterns.stream() .map(s -> Pattern.compile(s).asMatchPredicate()) .collect(Collectors.toList()); this.versionDiscriminator = versionDiscriminator; this.description = description; } } }
class BundleValidator extends Validator { public BundleValidator() {} @Override public void validate(VespaModel model, DeployState deployState) { ApplicationPackage app = deployState.getApplicationPackage(); for (ComponentInfo info : app.getComponentsInfo(deployState.getVespaVersion())) { Path path = Path.fromString(info.getPathRelativeToAppDir()); try { DeployLogger deployLogger = deployState.getDeployLogger(); deployLogger.log(Level.FINE, String.format("Validating bundle at '%s'", path)); JarFile jarFile = new JarFile(app.getFileReference(path)); validateJarFile(deployLogger, deployState.isHosted(), jarFile); } catch (IOException e) { throw new IllegalArgumentException( "Failed to validate JAR file '" + path.last() + "'", e); } } } void validateJarFile(DeployLogger deployLogger, boolean isHosted, JarFile jarFile) throws IOException { Manifest manifest = jarFile.getManifest(); String filename = Paths.get(jarFile.getName()).getFileName().toString(); if (manifest == null) { throw new IllegalArgumentException("Non-existing or invalid manifest in " + filename); } validateManifest(deployLogger, filename, manifest); getPomXmlContent(deployLogger, jarFile) .ifPresent(pomXml -> validatePomXml(deployLogger, isHosted, filename, pomXml)); } private void validateManifest(DeployLogger deployLogger, String filename, Manifest mf) { Attributes attributes = mf.getMainAttributes(); HashSet<String> mfAttributes = new HashSet<>(); for (Map.Entry<Object,Object> entry : attributes.entrySet()) { mfAttributes.add(entry.getKey().toString()); } List<String> requiredOSGIHeaders = Arrays.asList( "Bundle-ManifestVersion", "Bundle-Name", "Bundle-SymbolicName", "Bundle-Version"); for (String header : requiredOSGIHeaders) { if (!mfAttributes.contains(header)) { throw new IllegalArgumentException("Required OSGI header '" + header + "' was not found in manifest in '" + filename + "'"); } } if (attributes.getValue("Bundle-Version").endsWith(".SNAPSHOT")) { deployLogger.logApplicationPackage(Level.WARNING, "Deploying snapshot bundle " + filename + ".\nTo use this bundle, you must include the qualifier 'SNAPSHOT' in the version specification in services.xml."); } if (attributes.getValue("Import-Package") != null) { validateImportedPackages(deployLogger, filename, mf); } } private static void validateImportedPackages(DeployLogger deployLogger, String filename, Manifest manifest) { Domain osgiHeaders = Domain.domain(manifest); Parameters importPackage = osgiHeaders.getImportPackage(); Map<DeprecatedProvidedBundle, List<String>> deprecatedPackagesInUse = new HashMap<>(); importPackage.forEach((packageName, attrs) -> { VersionRange versionRange = attrs.getVersion() != null ? VersionRange.parseOSGiVersionRange(attrs.getVersion()) : null; for (DeprecatedProvidedBundle deprecatedBundle : DeprecatedProvidedBundle.values()) { for (Predicate<String> matcher : deprecatedBundle.javaPackageMatchers) { if (matcher.test(packageName) && (versionRange == null || deprecatedBundle.versionDiscriminator.test(versionRange))) { deprecatedPackagesInUse.computeIfAbsent(deprecatedBundle, __ -> new ArrayList<>()) .add(packageName); } } } }); deprecatedPackagesInUse.forEach((artifact, packagesInUse) -> { deployLogger.logApplicationPackage(Level.WARNING, String.format("For JAR file '%s': \n" + "Manifest imports the following Java packages from '%s': %s. \n" + "%s", filename, artifact.name, packagesInUse, artifact.description)); }); } private static final Pattern POM_FILE_LOCATION = Pattern.compile("META-INF/maven/.+?/.+?/pom.xml"); private Optional<String> getPomXmlContent(DeployLogger deployLogger, JarFile jarFile) { return jarFile.stream() .filter(f -> POM_FILE_LOCATION.matcher(f.getName()).matches()) .findFirst() .map(f -> { try { return new String(jarFile.getInputStream(f).readAllBytes()); } catch (IOException e) { deployLogger.log(Level.INFO, String.format("Unable to read '%s' from '%s'", f.getName(), jarFile.getName())); return null; } }); } private static void validateDependencies(DeployLogger deployLogger, String jarFilename, Document pom) throws XPathExpressionException { forEachPomXmlElement(pom, "dependencies/dependency", dependency -> { String groupId = dependency.getElementsByTagName("groupId").item(0).getTextContent(); String artifactId = dependency.getElementsByTagName("artifactId").item(0).getTextContent(); for (DeprecatedMavenArtifact deprecatedArtifact : DeprecatedMavenArtifact.values()) { if (groupId.equals(deprecatedArtifact.groupId) && artifactId.equals(deprecatedArtifact.artifactId)) { deployLogger.logApplicationPackage(Level.WARNING, String.format( "The pom.xml of bundle '%s' includes a dependency to the artifact '%s:%s'. \n%s", jarFilename, groupId, artifactId, deprecatedArtifact.description)); } } }); } private static void validateRepositories(DeployLogger deployLogger, String jarFilename, Document pom) throws XPathExpressionException { forEachPomXmlElement(pom, "pluginRepositories/pluginRepository", repository -> validateRepository(deployLogger, jarFilename, "pluginRepositories", repository)); forEachPomXmlElement(pom, "repositories/repository", repository -> validateRepository(deployLogger, jarFilename, "repositories", repository)); } private static void validateRepository(DeployLogger deployLogger, String jarFilename, String parentElementName, Element element) { String url = element.getElementsByTagName("url").item(0).getTextContent(); if (url.contains("vespa-maven-libs-release-local")) { deployLogger.logApplicationPackage(Level.WARNING, String.format("<%s> in pom.xml of '%s' uses deprecated Maven repository '%s'.\n See announcement.", parentElementName, jarFilename, url)); } } private static void forEachPomXmlElement(Document pom, String xpath, Consumer<Element> consumer) throws XPathExpressionException { NodeList dependencies = (NodeList) XPathFactory.newDefaultInstance().newXPath() .compile("/project/" + xpath) .evaluate(pom, XPathConstants.NODESET); for (int i = 0; i < dependencies.getLength(); i++) { Element element = (Element) dependencies.item(i); consumer.accept(element); } } private enum DeprecatedMavenArtifact { VESPA_HTTP_CLIENT_EXTENSION("com.yahoo.vespa", "vespa-http-client-extensions", "This artifact will be removed in Vespa 8. " + "Programmatic use can be safely removed from system/staging tests. " + "See internal Vespa 8 release notes for details."); final String groupId; final String artifactId; final String description; DeprecatedMavenArtifact(String groupId, String artifactId, String description) { this.groupId = groupId; this.artifactId = artifactId; this.description = description; } } private enum DeprecatedProvidedBundle { ORG_JSON("org.json:json", "The org.json library will no longer provided by jdisc runtime on Vespa 8. " + "See https: Set.of("org\\.json")); final String name; final Collection<Predicate<String>> javaPackageMatchers; final Predicate<VersionRange> versionDiscriminator; final String description; DeprecatedProvidedBundle(String name, String description, Collection<String> javaPackagePatterns) { this(name, description, __ -> true, javaPackagePatterns); } DeprecatedProvidedBundle(String name, String description, Predicate<VersionRange> versionDiscriminator, Collection<String> javaPackagePatterns) { this.name = name; this.javaPackageMatchers = javaPackagePatterns.stream() .map(s -> Pattern.compile(s).asMatchPredicate()) .collect(Collectors.toList()); this.versionDiscriminator = versionDiscriminator; this.description = description; } } }
I'm looking into whether we can split the bundle validator into a public and internal part.
private void validatePomXml(DeployLogger deployLogger, boolean isHosted, String jarFilename, String pomXmlContent) { if (isHosted) { try { Document pom = DocumentBuilderFactory.newDefaultInstance().newDocumentBuilder() .parse(new InputSource(new StringReader(pomXmlContent))); validateDependencies(deployLogger, jarFilename, pom); validateRepositories(deployLogger, jarFilename, pom); } catch (ParserConfigurationException e) { throw new RuntimeException(e); } catch (Exception e) { deployLogger.log(Level.INFO, String.format("Unable to parse pom.xml from %s", jarFilename)); } } }
validateDependencies(deployLogger, jarFilename, pom);
private void validatePomXml(DeployLogger deployLogger, boolean isHosted, String jarFilename, String pomXmlContent) { if (isHosted) { try { Document pom = DocumentBuilderFactory.newDefaultInstance().newDocumentBuilder() .parse(new InputSource(new StringReader(pomXmlContent))); validateDependencies(deployLogger, jarFilename, pom); validateRepositories(deployLogger, jarFilename, pom); } catch (ParserConfigurationException e) { throw new RuntimeException(e); } catch (Exception e) { deployLogger.log(Level.INFO, String.format("Unable to parse pom.xml from %s", jarFilename)); } } }
class BundleValidator extends Validator { public BundleValidator() {} @Override public void validate(VespaModel model, DeployState deployState) { ApplicationPackage app = deployState.getApplicationPackage(); for (ComponentInfo info : app.getComponentsInfo(deployState.getVespaVersion())) { Path path = Path.fromString(info.getPathRelativeToAppDir()); try { DeployLogger deployLogger = deployState.getDeployLogger(); deployLogger.log(Level.FINE, String.format("Validating bundle at '%s'", path)); JarFile jarFile = new JarFile(app.getFileReference(path)); validateJarFile(deployLogger, deployState.isHosted(), jarFile); } catch (IOException e) { throw new IllegalArgumentException( "Failed to validate JAR file '" + path.last() + "'", e); } } } void validateJarFile(DeployLogger deployLogger, boolean isHosted, JarFile jarFile) throws IOException { Manifest manifest = jarFile.getManifest(); String filename = Paths.get(jarFile.getName()).getFileName().toString(); if (manifest == null) { throw new IllegalArgumentException("Non-existing or invalid manifest in " + filename); } validateManifest(deployLogger, filename, manifest); getPomXmlContent(deployLogger, jarFile) .ifPresent(pomXml -> validatePomXml(deployLogger, isHosted, filename, pomXml)); } private void validateManifest(DeployLogger deployLogger, String filename, Manifest mf) { Attributes attributes = mf.getMainAttributes(); HashSet<String> mfAttributes = new HashSet<>(); for (Map.Entry<Object,Object> entry : attributes.entrySet()) { mfAttributes.add(entry.getKey().toString()); } List<String> requiredOSGIHeaders = Arrays.asList( "Bundle-ManifestVersion", "Bundle-Name", "Bundle-SymbolicName", "Bundle-Version"); for (String header : requiredOSGIHeaders) { if (!mfAttributes.contains(header)) { throw new IllegalArgumentException("Required OSGI header '" + header + "' was not found in manifest in '" + filename + "'"); } } if (attributes.getValue("Bundle-Version").endsWith(".SNAPSHOT")) { deployLogger.logApplicationPackage(Level.WARNING, "Deploying snapshot bundle " + filename + ".\nTo use this bundle, you must include the qualifier 'SNAPSHOT' in the version specification in services.xml."); } if (attributes.getValue("Import-Package") != null) { validateImportedPackages(deployLogger, filename, mf); } } private static void validateImportedPackages(DeployLogger deployLogger, String filename, Manifest manifest) { Domain osgiHeaders = Domain.domain(manifest); Parameters importPackage = osgiHeaders.getImportPackage(); Map<DeprecatedProvidedBundle, List<String>> deprecatedPackagesInUse = new HashMap<>(); importPackage.forEach((packageName, attrs) -> { VersionRange versionRange = attrs.getVersion() != null ? VersionRange.parseOSGiVersionRange(attrs.getVersion()) : null; for (DeprecatedProvidedBundle deprecatedBundle : DeprecatedProvidedBundle.values()) { for (Predicate<String> matcher : deprecatedBundle.javaPackageMatchers) { if (matcher.test(packageName) && (versionRange == null || deprecatedBundle.versionDiscriminator.test(versionRange))) { deprecatedPackagesInUse.computeIfAbsent(deprecatedBundle, __ -> new ArrayList<>()) .add(packageName); } } } }); deprecatedPackagesInUse.forEach((artifact, packagesInUse) -> { deployLogger.logApplicationPackage(Level.WARNING, String.format("For JAR file '%s': \n" + "Manifest imports the following Java packages from '%s': %s. \n" + "%s", filename, artifact.name, packagesInUse, artifact.description)); }); } private static final Pattern POM_FILE_LOCATION = Pattern.compile("META-INF/maven/.+?/.+?/pom.xml"); private Optional<String> getPomXmlContent(DeployLogger deployLogger, JarFile jarFile) { return jarFile.stream() .filter(f -> POM_FILE_LOCATION.matcher(f.getName()).matches()) .findFirst() .map(f -> { try { return new String(jarFile.getInputStream(f).readAllBytes()); } catch (IOException e) { deployLogger.log(Level.INFO, String.format("Unable to read '%s' from '%s'", f.getName(), jarFile.getName())); return null; } }); } private static void validateDependencies(DeployLogger deployLogger, String jarFilename, Document pom) throws XPathExpressionException { forEachPomXmlElement(pom, "dependencies/dependency", dependency -> { String groupId = dependency.getElementsByTagName("groupId").item(0).getTextContent(); String artifactId = dependency.getElementsByTagName("artifactId").item(0).getTextContent(); for (DeprecatedMavenArtifact deprecatedArtifact : DeprecatedMavenArtifact.values()) { if (groupId.equals(deprecatedArtifact.groupId) && artifactId.equals(deprecatedArtifact.artifactId)) { deployLogger.logApplicationPackage(Level.WARNING, String.format( "The pom.xml of bundle '%s' includes a dependency to the artifact '%s:%s'. \n%s", jarFilename, groupId, artifactId, deprecatedArtifact.description)); } } }); } private static void validateRepositories(DeployLogger deployLogger, String jarFilename, Document pom) throws XPathExpressionException { forEachPomXmlElement(pom, "pluginRepositories/pluginRepository", repository -> validateRepository(deployLogger, jarFilename, "pluginRepositories", repository)); forEachPomXmlElement(pom, "repositories/repository", repository -> validateRepository(deployLogger, jarFilename, "repositories", repository)); } private static void validateRepository(DeployLogger deployLogger, String jarFilename, String parentElementName, Element element) { String url = element.getElementsByTagName("url").item(0).getTextContent(); if (url.contains("vespa-maven-libs-release-local")) { deployLogger.logApplicationPackage(Level.WARNING, String.format("<%s> in pom.xml of '%s' uses deprecated Maven repository '%s'.\n See announcement.", parentElementName, jarFilename, url)); } } private static void forEachPomXmlElement(Document pom, String xpath, Consumer<Element> consumer) throws XPathExpressionException { NodeList dependencies = (NodeList) XPathFactory.newDefaultInstance().newXPath() .compile("/project/" + xpath) .evaluate(pom, XPathConstants.NODESET); for (int i = 0; i < dependencies.getLength(); i++) { Element element = (Element) dependencies.item(i); consumer.accept(element); } } private enum DeprecatedMavenArtifact { VESPA_HTTP_CLIENT_EXTENSION("com.yahoo.vespa", "vespa-http-client-extensions", "This artifact will be removed in Vespa 8. " + "Programmatic use can be safely removed from system/staging tests. " + "See internal Vespa 8 release notes for details."); final String groupId; final String artifactId; final String description; DeprecatedMavenArtifact(String groupId, String artifactId, String description) { this.groupId = groupId; this.artifactId = artifactId; this.description = description; } } private enum DeprecatedProvidedBundle { ORG_JSON("org.json:json", "The org.json library will no longer provided by jdisc runtime on Vespa 8. " + "See https: Set.of("org\\.json")); final String name; final Collection<Predicate<String>> javaPackageMatchers; final Predicate<VersionRange> versionDiscriminator; final String description; DeprecatedProvidedBundle(String name, String description, Collection<String> javaPackagePatterns) { this(name, description, __ -> true, javaPackagePatterns); } DeprecatedProvidedBundle(String name, String description, Predicate<VersionRange> versionDiscriminator, Collection<String> javaPackagePatterns) { this.name = name; this.javaPackageMatchers = javaPackagePatterns.stream() .map(s -> Pattern.compile(s).asMatchPredicate()) .collect(Collectors.toList()); this.versionDiscriminator = versionDiscriminator; this.description = description; } } }
class BundleValidator extends Validator { public BundleValidator() {} @Override public void validate(VespaModel model, DeployState deployState) { ApplicationPackage app = deployState.getApplicationPackage(); for (ComponentInfo info : app.getComponentsInfo(deployState.getVespaVersion())) { Path path = Path.fromString(info.getPathRelativeToAppDir()); try { DeployLogger deployLogger = deployState.getDeployLogger(); deployLogger.log(Level.FINE, String.format("Validating bundle at '%s'", path)); JarFile jarFile = new JarFile(app.getFileReference(path)); validateJarFile(deployLogger, deployState.isHosted(), jarFile); } catch (IOException e) { throw new IllegalArgumentException( "Failed to validate JAR file '" + path.last() + "'", e); } } } void validateJarFile(DeployLogger deployLogger, boolean isHosted, JarFile jarFile) throws IOException { Manifest manifest = jarFile.getManifest(); String filename = Paths.get(jarFile.getName()).getFileName().toString(); if (manifest == null) { throw new IllegalArgumentException("Non-existing or invalid manifest in " + filename); } validateManifest(deployLogger, filename, manifest); getPomXmlContent(deployLogger, jarFile) .ifPresent(pomXml -> validatePomXml(deployLogger, isHosted, filename, pomXml)); } private void validateManifest(DeployLogger deployLogger, String filename, Manifest mf) { Attributes attributes = mf.getMainAttributes(); HashSet<String> mfAttributes = new HashSet<>(); for (Map.Entry<Object,Object> entry : attributes.entrySet()) { mfAttributes.add(entry.getKey().toString()); } List<String> requiredOSGIHeaders = Arrays.asList( "Bundle-ManifestVersion", "Bundle-Name", "Bundle-SymbolicName", "Bundle-Version"); for (String header : requiredOSGIHeaders) { if (!mfAttributes.contains(header)) { throw new IllegalArgumentException("Required OSGI header '" + header + "' was not found in manifest in '" + filename + "'"); } } if (attributes.getValue("Bundle-Version").endsWith(".SNAPSHOT")) { deployLogger.logApplicationPackage(Level.WARNING, "Deploying snapshot bundle " + filename + ".\nTo use this bundle, you must include the qualifier 'SNAPSHOT' in the version specification in services.xml."); } if (attributes.getValue("Import-Package") != null) { validateImportedPackages(deployLogger, filename, mf); } } private static void validateImportedPackages(DeployLogger deployLogger, String filename, Manifest manifest) { Domain osgiHeaders = Domain.domain(manifest); Parameters importPackage = osgiHeaders.getImportPackage(); Map<DeprecatedProvidedBundle, List<String>> deprecatedPackagesInUse = new HashMap<>(); importPackage.forEach((packageName, attrs) -> { VersionRange versionRange = attrs.getVersion() != null ? VersionRange.parseOSGiVersionRange(attrs.getVersion()) : null; for (DeprecatedProvidedBundle deprecatedBundle : DeprecatedProvidedBundle.values()) { for (Predicate<String> matcher : deprecatedBundle.javaPackageMatchers) { if (matcher.test(packageName) && (versionRange == null || deprecatedBundle.versionDiscriminator.test(versionRange))) { deprecatedPackagesInUse.computeIfAbsent(deprecatedBundle, __ -> new ArrayList<>()) .add(packageName); } } } }); deprecatedPackagesInUse.forEach((artifact, packagesInUse) -> { deployLogger.logApplicationPackage(Level.WARNING, String.format("For JAR file '%s': \n" + "Manifest imports the following Java packages from '%s': %s. \n" + "%s", filename, artifact.name, packagesInUse, artifact.description)); }); } private static final Pattern POM_FILE_LOCATION = Pattern.compile("META-INF/maven/.+?/.+?/pom.xml"); private Optional<String> getPomXmlContent(DeployLogger deployLogger, JarFile jarFile) { return jarFile.stream() .filter(f -> POM_FILE_LOCATION.matcher(f.getName()).matches()) .findFirst() .map(f -> { try { return new String(jarFile.getInputStream(f).readAllBytes()); } catch (IOException e) { deployLogger.log(Level.INFO, String.format("Unable to read '%s' from '%s'", f.getName(), jarFile.getName())); return null; } }); } private static void validateDependencies(DeployLogger deployLogger, String jarFilename, Document pom) throws XPathExpressionException { forEachPomXmlElement(pom, "dependencies/dependency", dependency -> { String groupId = dependency.getElementsByTagName("groupId").item(0).getTextContent(); String artifactId = dependency.getElementsByTagName("artifactId").item(0).getTextContent(); for (DeprecatedMavenArtifact deprecatedArtifact : DeprecatedMavenArtifact.values()) { if (groupId.equals(deprecatedArtifact.groupId) && artifactId.equals(deprecatedArtifact.artifactId)) { deployLogger.logApplicationPackage(Level.WARNING, String.format( "The pom.xml of bundle '%s' includes a dependency to the artifact '%s:%s'. \n%s", jarFilename, groupId, artifactId, deprecatedArtifact.description)); } } }); } private static void validateRepositories(DeployLogger deployLogger, String jarFilename, Document pom) throws XPathExpressionException { forEachPomXmlElement(pom, "pluginRepositories/pluginRepository", repository -> validateRepository(deployLogger, jarFilename, "pluginRepositories", repository)); forEachPomXmlElement(pom, "repositories/repository", repository -> validateRepository(deployLogger, jarFilename, "repositories", repository)); } private static void validateRepository(DeployLogger deployLogger, String jarFilename, String parentElementName, Element element) { String url = element.getElementsByTagName("url").item(0).getTextContent(); if (url.contains("vespa-maven-libs-release-local")) { deployLogger.logApplicationPackage(Level.WARNING, String.format("<%s> in pom.xml of '%s' uses deprecated Maven repository '%s'.\n See announcement.", parentElementName, jarFilename, url)); } } private static void forEachPomXmlElement(Document pom, String xpath, Consumer<Element> consumer) throws XPathExpressionException { NodeList dependencies = (NodeList) XPathFactory.newDefaultInstance().newXPath() .compile("/project/" + xpath) .evaluate(pom, XPathConstants.NODESET); for (int i = 0; i < dependencies.getLength(); i++) { Element element = (Element) dependencies.item(i); consumer.accept(element); } } private enum DeprecatedMavenArtifact { VESPA_HTTP_CLIENT_EXTENSION("com.yahoo.vespa", "vespa-http-client-extensions", "This artifact will be removed in Vespa 8. " + "Programmatic use can be safely removed from system/staging tests. " + "See internal Vespa 8 release notes for details."); final String groupId; final String artifactId; final String description; DeprecatedMavenArtifact(String groupId, String artifactId, String description) { this.groupId = groupId; this.artifactId = artifactId; this.description = description; } } private enum DeprecatedProvidedBundle { ORG_JSON("org.json:json", "The org.json library will no longer provided by jdisc runtime on Vespa 8. " + "See https: Set.of("org\\.json")); final String name; final Collection<Predicate<String>> javaPackageMatchers; final Predicate<VersionRange> versionDiscriminator; final String description; DeprecatedProvidedBundle(String name, String description, Collection<String> javaPackagePatterns) { this(name, description, __ -> true, javaPackagePatterns); } DeprecatedProvidedBundle(String name, String description, Predicate<VersionRange> versionDiscriminator, Collection<String> javaPackagePatterns) { this.name = name; this.javaPackageMatchers = javaPackagePatterns.stream() .map(s -> Pattern.compile(s).asMatchPredicate()) .collect(Collectors.toList()); this.versionDiscriminator = versionDiscriminator; this.description = description; } } }
A bit simpler to map to `info.name()` here, and just use `Set.contains` below.
private boolean serviceIsInClusterWhichShouldBeChecked(Application application, ServiceInfo serviceInfo) { Set<ApplicationClusterInfo> excludeFromChecking = application.getModel().applicationClusterInfo() .stream() .filter(ApplicationClusterInfo::getDeferChangesUntilRestart) .collect(Collectors.toSet()); return excludeFromChecking.stream().noneMatch(info -> info.name().equals(serviceInfo.getProperty("clustername").orElse(""))); }
.collect(Collectors.toSet());
private boolean serviceIsInClusterWhichShouldBeChecked(Application application, ServiceInfo serviceInfo) { Set<ApplicationClusterInfo> excludeFromChecking = application.getModel().applicationClusterInfo() .stream() .filter(ApplicationClusterInfo::getDeferChangesUntilRestart) .collect(Collectors.toSet()); return excludeFromChecking.stream().noneMatch(info -> info.name().equals(serviceInfo.getProperty("clustername").orElse(""))); }
class ConfigConvergenceChecker extends AbstractComponent { private static final Logger log = Logger.getLogger(ConfigConvergenceChecker.class.getName()); private final static Set<String> serviceTypesToCheck = Set.of( CONTAINER.serviceName, QRSERVER.serviceName, LOGSERVER_CONTAINER.serviceName, CLUSTERCONTROLLER_CONTAINER.serviceName, METRICS_PROXY_CONTAINER.serviceName, "searchnode", "storagenode", "distributor" ); private final ExecutorService responseHandlerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config-convergence-checker-response-handler-")); private final ObjectMapper jsonMapper = new ObjectMapper(); @Inject public ConfigConvergenceChecker() {} /** Fetches the active config generation for all services in the given application. */ public Map<ServiceInfo, Long> getServiceConfigGenerations(Application application, Duration timeoutPerService) { return getServiceConfigGenerations(application, timeoutPerService, true); } /** * Fetches the active config generation for all services in the given application. Will not check services * which defer config changes until restart if checkAll is false. */ private Map<ServiceInfo, Long> getServiceConfigGenerations(Application application, Duration timeoutPerService, boolean checkAll) { List<ServiceInfo> servicesToCheck = new ArrayList<>(); application.getModel().getHosts() .forEach(host -> host.getServices().stream() .filter(service -> serviceTypesToCheck.contains(service.getServiceType())) .filter(serviceInfo -> shouldCheckService(checkAll, application, serviceInfo)) .forEach(service -> getStatePort(service).ifPresent(port -> servicesToCheck.add(service)))); log.log(Level.FINE, "Services to check for config convergence: " + servicesToCheck); return getServiceGenerations(servicesToCheck, timeoutPerService); } /** Checks all services in given application. Returns the minimum current generation of all services */ public ServiceListResponse checkConvergenceForAllServices(Application application, Duration timeoutPerService) { return checkConvergence(application, timeoutPerService, true); } /** * Checks services except those which defer config changes until restart in the given application. * Returns the minimum current generation of those services. */ public ServiceListResponse checkConvergenceUnlessDeferringChangesUntilRestart(Application application, Duration timeoutPerService) { return checkConvergence(application, timeoutPerService, false); } private ServiceListResponse checkConvergence(Application application, Duration timeoutPerService, boolean checkAll) { Map<ServiceInfo, Long> currentGenerations = getServiceConfigGenerations(application, timeoutPerService, checkAll); long currentGeneration = currentGenerations.values().stream().mapToLong(Long::longValue).min().orElse(-1); return new ServiceListResponse(currentGenerations, application.getApplicationGeneration(), currentGeneration); } /** Check service identified by host and port in given application */ public ServiceResponse getServiceConfigGeneration(Application application, String hostAndPortToCheck, Duration timeout) { Long wantedGeneration = application.getApplicationGeneration(); try (CloseableHttpAsyncClient client = createHttpClient()) { client.start(); if ( ! hostInApplication(application, hostAndPortToCheck)) return new ServiceResponse(ServiceResponse.Status.hostNotFound, wantedGeneration); long currentGeneration = getServiceGeneration(client, URI.create("http: boolean converged = currentGeneration >= wantedGeneration; return new ServiceResponse(ServiceResponse.Status.ok, wantedGeneration, currentGeneration, converged); } catch (InterruptedException | ExecutionException | CancellationException e) { return new ServiceResponse(ServiceResponse.Status.notFound, wantedGeneration, e.getMessage()); } catch (Exception e) { return new ServiceResponse(ServiceResponse.Status.error, wantedGeneration, e.getMessage()); } } private boolean shouldCheckService(boolean checkServicesWithDeferChangesUntilRestart, Application application, ServiceInfo serviceInfo) { if (checkServicesWithDeferChangesUntilRestart) return true; if (isNotContainer(serviceInfo)) return true; return serviceIsInClusterWhichShouldBeChecked(application, serviceInfo); } private boolean isNotContainer(ServiceInfo serviceInfo) { return ! List.of(CONTAINER.serviceName, QRSERVER.serviceName, METRICS_PROXY_CONTAINER).contains(serviceInfo.getServiceType()); } /** Gets service generation for a list of services (in parallel). */ private Map<ServiceInfo, Long> getServiceGenerations(List<ServiceInfo> services, Duration timeout) { try (CloseableHttpAsyncClient client = createHttpClient()) { client.start(); List<CompletableFuture<Void>> inprogressRequests = new ArrayList<>(); ConcurrentMap<ServiceInfo, Long> temporaryResult = new ConcurrentHashMap<>(); for (ServiceInfo service : services) { int statePort = getStatePort(service).orElse(0); if (statePort <= 0) continue; URI uri = URI.create("http: CompletableFuture<Void> inprogressRequest = getServiceGeneration(client, uri, timeout) .handle((result, error) -> { if (result != null) { temporaryResult.put(service, result); } else { log.log( Level.FINE, error, () -> String.format("Failed to retrieve service config generation for '%s': %s", service, error.getMessage())); temporaryResult.put(service, -1L); } return null; }); inprogressRequests.add(inprogressRequest); } CompletableFuture.allOf(inprogressRequests.toArray(CompletableFuture[]::new)).join(); return createMapOrderedByServiceList(services, temporaryResult); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Get service generation of service at given URL */ private CompletableFuture<Long> getServiceGeneration(CloseableHttpAsyncClient client, URI serviceUrl, Duration timeout) { SimpleHttpRequest request = SimpleRequestBuilder.get(createApiUri(serviceUrl)).build(); request.setConfig(createRequestConfig(timeout)); CompletableFuture<SimpleHttpResponse> responsePromise = new CompletableFuture<>(); client.execute(request, new FutureCallback<>() { @Override public void completed(SimpleHttpResponse result) { responsePromise.complete(result); } @Override public void failed(Exception ex) { responsePromise.completeExceptionally(ex); } @Override public void cancelled() { responsePromise.cancel(false); } }); return responsePromise.thenApplyAsync(this::handleResponse, responseHandlerExecutor); } private long handleResponse(SimpleHttpResponse response) throws UncheckedIOException { try { int statusCode = response.getCode(); if (statusCode != HttpStatus.SC_OK) throw new IOException("Expected status code 200, got " + statusCode); if (response.getBody() == null) throw new IOException("Response has no content"); return generationFromContainerState(jsonMapper.readTree(response.getBodyText())); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean hostInApplication(Application application, String hostPort) { for (HostInfo host : application.getModel().getHosts()) { if (hostPort.startsWith(host.getHostname())) { for (ServiceInfo service : host.getServices()) { for (PortInfo port : service.getPorts()) { if (hostPort.equals(host.getHostname() + ":" + port.getPort())) { return true; } } } } } return false; } public static Optional<Integer> getStatePort(ServiceInfo service) { return service.getPorts().stream() .filter(port -> port.getTags().contains("state")) .map(PortInfo::getPort) .findFirst(); } @Override public void deconstruct() { responseHandlerExecutor.shutdown(); try { responseHandlerExecutor.awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Unable to shutdown executor", e); } } private static long generationFromContainerState(JsonNode state) { return state.get("config").get("generation").asLong(-1); } private static Map<ServiceInfo, Long> createMapOrderedByServiceList( List<ServiceInfo> services, ConcurrentMap<ServiceInfo, Long> result) { Map<ServiceInfo, Long> orderedResult = new LinkedHashMap<>(); for (ServiceInfo service : services) { Long generation = result.get(service); if (generation != null) { orderedResult.put(service, generation); } } return orderedResult; } private static URI createApiUri(URI serviceUrl) { try { return new URIBuilder(serviceUrl) .setPath("/state/v1/config") .build(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } private static RequestConfig createRequestConfig(Duration timeout) { return RequestConfig.custom() .setConnectionRequestTimeout(Timeout.ofSeconds(1)) .setResponseTimeout(Timeout.ofMilliseconds(timeout.toMillis())) .setConnectTimeout(Timeout.ofSeconds(1)) .build(); } private static CloseableHttpAsyncClient createHttpClient() { return VespaAsyncHttpClientBuilder .create(tlsStrategy -> PoolingAsyncClientConnectionManagerBuilder.create() .setMaxConnTotal(100) .setMaxConnPerRoute(10) .setConnectionTimeToLive(TimeValue.ofMilliseconds(1)) .setTlsStrategy(tlsStrategy) .build()) .setIOReactorConfig(IOReactorConfig.custom() .setSoTimeout(Timeout.ofSeconds(2)) .build()) .setUserAgent("config-convergence-checker") .build(); } public static class ServiceResponse { public enum Status { ok, notFound, hostNotFound, error } public final Status status; public final Long wantedGeneration; public final Long currentGeneration; public final boolean converged; public final Optional<String> errorMessage; public ServiceResponse(Status status, long wantedGeneration) { this(status, wantedGeneration, 0); } public ServiceResponse(Status status, long wantedGeneration, long currentGeneration) { this(status, wantedGeneration, currentGeneration, false); } public ServiceResponse(Status status, long wantedGeneration, long currentGeneration, boolean converged) { this(status, wantedGeneration, currentGeneration, converged, Optional.empty()); } public ServiceResponse(Status status, long wantedGeneration, String errorMessage) { this(status, wantedGeneration, 0, false, Optional.ofNullable(errorMessage)); } private ServiceResponse(Status status, long wantedGeneration, long currentGeneration, boolean converged, Optional<String> errorMessage) { this.status = status; this.wantedGeneration = wantedGeneration; this.currentGeneration = currentGeneration; this.converged = converged; this.errorMessage = errorMessage; } } public static class ServiceListResponse { public final List<Service> services = new ArrayList<>(); public final long wantedGeneration; public final long currentGeneration; public final boolean converged; public ServiceListResponse(Map<ServiceInfo, Long> services, long wantedGeneration, long currentGeneration) { services.forEach((key, value) -> this.services.add(new Service(key, value))); this.wantedGeneration = wantedGeneration; this.currentGeneration = currentGeneration; this.converged = currentGeneration >= wantedGeneration; } public List<Service> services() { return services; } public static class Service { public final ServiceInfo serviceInfo; public final Long currentGeneration; public Service(ServiceInfo serviceInfo, Long currentGeneration) { this.serviceInfo = serviceInfo; this.currentGeneration = currentGeneration; } } } }
class ConfigConvergenceChecker extends AbstractComponent { private static final Logger log = Logger.getLogger(ConfigConvergenceChecker.class.getName()); private final static Set<String> serviceTypesToCheck = Set.of( CONTAINER.serviceName, QRSERVER.serviceName, LOGSERVER_CONTAINER.serviceName, CLUSTERCONTROLLER_CONTAINER.serviceName, METRICS_PROXY_CONTAINER.serviceName, "searchnode", "storagenode", "distributor" ); private final ExecutorService responseHandlerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config-convergence-checker-response-handler-")); private final ObjectMapper jsonMapper = new ObjectMapper(); @Inject public ConfigConvergenceChecker() {} /** Fetches the active config generation for all services in the given application. */ public Map<ServiceInfo, Long> getServiceConfigGenerations(Application application, Duration timeoutPerService) { return getServiceConfigGenerations(application, timeoutPerService, true); } /** * Fetches the active config generation for all services in the given application. Will not check services * which defer config changes until restart if checkAll is false. */ private Map<ServiceInfo, Long> getServiceConfigGenerations(Application application, Duration timeoutPerService, boolean checkAll) { List<ServiceInfo> servicesToCheck = new ArrayList<>(); application.getModel().getHosts() .forEach(host -> host.getServices().stream() .filter(service -> serviceTypesToCheck.contains(service.getServiceType())) .filter(serviceInfo -> shouldCheckService(checkAll, application, serviceInfo)) .forEach(service -> getStatePort(service).ifPresent(port -> servicesToCheck.add(service)))); log.log(Level.FINE, "Services to check for config convergence: " + servicesToCheck); return getServiceGenerations(servicesToCheck, timeoutPerService); } /** Checks all services in given application. Returns the minimum current generation of all services */ public ServiceListResponse checkConvergenceForAllServices(Application application, Duration timeoutPerService) { return checkConvergence(application, timeoutPerService, true); } /** * Checks services except those which defer config changes until restart in the given application. * Returns the minimum current generation of those services. */ public ServiceListResponse checkConvergenceUnlessDeferringChangesUntilRestart(Application application, Duration timeoutPerService) { return checkConvergence(application, timeoutPerService, false); } private ServiceListResponse checkConvergence(Application application, Duration timeoutPerService, boolean checkAll) { Map<ServiceInfo, Long> currentGenerations = getServiceConfigGenerations(application, timeoutPerService, checkAll); long currentGeneration = currentGenerations.values().stream().mapToLong(Long::longValue).min().orElse(-1); return new ServiceListResponse(currentGenerations, application.getApplicationGeneration(), currentGeneration); } /** Check service identified by host and port in given application */ public ServiceResponse getServiceConfigGeneration(Application application, String hostAndPortToCheck, Duration timeout) { Long wantedGeneration = application.getApplicationGeneration(); try (CloseableHttpAsyncClient client = createHttpClient()) { client.start(); if ( ! hostInApplication(application, hostAndPortToCheck)) return new ServiceResponse(ServiceResponse.Status.hostNotFound, wantedGeneration); long currentGeneration = getServiceGeneration(client, URI.create("http: boolean converged = currentGeneration >= wantedGeneration; return new ServiceResponse(ServiceResponse.Status.ok, wantedGeneration, currentGeneration, converged); } catch (InterruptedException | ExecutionException | CancellationException e) { return new ServiceResponse(ServiceResponse.Status.notFound, wantedGeneration, e.getMessage()); } catch (Exception e) { return new ServiceResponse(ServiceResponse.Status.error, wantedGeneration, e.getMessage()); } } private boolean shouldCheckService(boolean checkServicesWithDeferChangesUntilRestart, Application application, ServiceInfo serviceInfo) { if (checkServicesWithDeferChangesUntilRestart) return true; if (isNotContainer(serviceInfo)) return true; return serviceIsInClusterWhichShouldBeChecked(application, serviceInfo); } private boolean isNotContainer(ServiceInfo serviceInfo) { return ! List.of(CONTAINER.serviceName, QRSERVER.serviceName, METRICS_PROXY_CONTAINER).contains(serviceInfo.getServiceType()); } /** Gets service generation for a list of services (in parallel). */ private Map<ServiceInfo, Long> getServiceGenerations(List<ServiceInfo> services, Duration timeout) { try (CloseableHttpAsyncClient client = createHttpClient()) { client.start(); List<CompletableFuture<Void>> inprogressRequests = new ArrayList<>(); ConcurrentMap<ServiceInfo, Long> temporaryResult = new ConcurrentHashMap<>(); for (ServiceInfo service : services) { int statePort = getStatePort(service).orElse(0); if (statePort <= 0) continue; URI uri = URI.create("http: CompletableFuture<Void> inprogressRequest = getServiceGeneration(client, uri, timeout) .handle((result, error) -> { if (result != null) { temporaryResult.put(service, result); } else { log.log( Level.FINE, error, () -> String.format("Failed to retrieve service config generation for '%s': %s", service, error.getMessage())); temporaryResult.put(service, -1L); } return null; }); inprogressRequests.add(inprogressRequest); } CompletableFuture.allOf(inprogressRequests.toArray(CompletableFuture[]::new)).join(); return createMapOrderedByServiceList(services, temporaryResult); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Get service generation of service at given URL */ private CompletableFuture<Long> getServiceGeneration(CloseableHttpAsyncClient client, URI serviceUrl, Duration timeout) { SimpleHttpRequest request = SimpleRequestBuilder.get(createApiUri(serviceUrl)).build(); request.setConfig(createRequestConfig(timeout)); CompletableFuture<SimpleHttpResponse> responsePromise = new CompletableFuture<>(); client.execute(request, new FutureCallback<>() { @Override public void completed(SimpleHttpResponse result) { responsePromise.complete(result); } @Override public void failed(Exception ex) { responsePromise.completeExceptionally(ex); } @Override public void cancelled() { responsePromise.cancel(false); } }); return responsePromise.thenApplyAsync(this::handleResponse, responseHandlerExecutor); } private long handleResponse(SimpleHttpResponse response) throws UncheckedIOException { try { int statusCode = response.getCode(); if (statusCode != HttpStatus.SC_OK) throw new IOException("Expected status code 200, got " + statusCode); if (response.getBody() == null) throw new IOException("Response has no content"); return generationFromContainerState(jsonMapper.readTree(response.getBodyText())); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean hostInApplication(Application application, String hostPort) { for (HostInfo host : application.getModel().getHosts()) { if (hostPort.startsWith(host.getHostname())) { for (ServiceInfo service : host.getServices()) { for (PortInfo port : service.getPorts()) { if (hostPort.equals(host.getHostname() + ":" + port.getPort())) { return true; } } } } } return false; } public static Optional<Integer> getStatePort(ServiceInfo service) { return service.getPorts().stream() .filter(port -> port.getTags().contains("state")) .map(PortInfo::getPort) .findFirst(); } @Override public void deconstruct() { responseHandlerExecutor.shutdown(); try { responseHandlerExecutor.awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Unable to shutdown executor", e); } } private static long generationFromContainerState(JsonNode state) { return state.get("config").get("generation").asLong(-1); } private static Map<ServiceInfo, Long> createMapOrderedByServiceList( List<ServiceInfo> services, ConcurrentMap<ServiceInfo, Long> result) { Map<ServiceInfo, Long> orderedResult = new LinkedHashMap<>(); for (ServiceInfo service : services) { Long generation = result.get(service); if (generation != null) { orderedResult.put(service, generation); } } return orderedResult; } private static URI createApiUri(URI serviceUrl) { try { return new URIBuilder(serviceUrl) .setPath("/state/v1/config") .build(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } private static RequestConfig createRequestConfig(Duration timeout) { return RequestConfig.custom() .setConnectionRequestTimeout(Timeout.ofSeconds(1)) .setResponseTimeout(Timeout.ofMilliseconds(timeout.toMillis())) .setConnectTimeout(Timeout.ofSeconds(1)) .build(); } private static CloseableHttpAsyncClient createHttpClient() { return VespaAsyncHttpClientBuilder .create(tlsStrategy -> PoolingAsyncClientConnectionManagerBuilder.create() .setMaxConnTotal(100) .setMaxConnPerRoute(10) .setConnectionTimeToLive(TimeValue.ofMilliseconds(1)) .setTlsStrategy(tlsStrategy) .build()) .setIOReactorConfig(IOReactorConfig.custom() .setSoTimeout(Timeout.ofSeconds(2)) .build()) .setUserAgent("config-convergence-checker") .build(); } public static class ServiceResponse { public enum Status { ok, notFound, hostNotFound, error } public final Status status; public final Long wantedGeneration; public final Long currentGeneration; public final boolean converged; public final Optional<String> errorMessage; public ServiceResponse(Status status, long wantedGeneration) { this(status, wantedGeneration, 0); } public ServiceResponse(Status status, long wantedGeneration, long currentGeneration) { this(status, wantedGeneration, currentGeneration, false); } public ServiceResponse(Status status, long wantedGeneration, long currentGeneration, boolean converged) { this(status, wantedGeneration, currentGeneration, converged, Optional.empty()); } public ServiceResponse(Status status, long wantedGeneration, String errorMessage) { this(status, wantedGeneration, 0, false, Optional.ofNullable(errorMessage)); } private ServiceResponse(Status status, long wantedGeneration, long currentGeneration, boolean converged, Optional<String> errorMessage) { this.status = status; this.wantedGeneration = wantedGeneration; this.currentGeneration = currentGeneration; this.converged = converged; this.errorMessage = errorMessage; } } public static class ServiceListResponse { public final List<Service> services = new ArrayList<>(); public final long wantedGeneration; public final long currentGeneration; public final boolean converged; public ServiceListResponse(Map<ServiceInfo, Long> services, long wantedGeneration, long currentGeneration) { services.forEach((key, value) -> this.services.add(new Service(key, value))); this.wantedGeneration = wantedGeneration; this.currentGeneration = currentGeneration; this.converged = currentGeneration >= wantedGeneration; } public List<Service> services() { return services; } public static class Service { public final ServiceInfo serviceInfo; public final Long currentGeneration; public Service(ServiceInfo serviceInfo, Long currentGeneration) { this.serviceInfo = serviceInfo; this.currentGeneration = currentGeneration; } } } }
Yup, will do that in next PR
private boolean serviceIsInClusterWhichShouldBeChecked(Application application, ServiceInfo serviceInfo) { Set<ApplicationClusterInfo> excludeFromChecking = application.getModel().applicationClusterInfo() .stream() .filter(ApplicationClusterInfo::getDeferChangesUntilRestart) .collect(Collectors.toSet()); return excludeFromChecking.stream().noneMatch(info -> info.name().equals(serviceInfo.getProperty("clustername").orElse(""))); }
.collect(Collectors.toSet());
private boolean serviceIsInClusterWhichShouldBeChecked(Application application, ServiceInfo serviceInfo) { Set<ApplicationClusterInfo> excludeFromChecking = application.getModel().applicationClusterInfo() .stream() .filter(ApplicationClusterInfo::getDeferChangesUntilRestart) .collect(Collectors.toSet()); return excludeFromChecking.stream().noneMatch(info -> info.name().equals(serviceInfo.getProperty("clustername").orElse(""))); }
class ConfigConvergenceChecker extends AbstractComponent { private static final Logger log = Logger.getLogger(ConfigConvergenceChecker.class.getName()); private final static Set<String> serviceTypesToCheck = Set.of( CONTAINER.serviceName, QRSERVER.serviceName, LOGSERVER_CONTAINER.serviceName, CLUSTERCONTROLLER_CONTAINER.serviceName, METRICS_PROXY_CONTAINER.serviceName, "searchnode", "storagenode", "distributor" ); private final ExecutorService responseHandlerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config-convergence-checker-response-handler-")); private final ObjectMapper jsonMapper = new ObjectMapper(); @Inject public ConfigConvergenceChecker() {} /** Fetches the active config generation for all services in the given application. */ public Map<ServiceInfo, Long> getServiceConfigGenerations(Application application, Duration timeoutPerService) { return getServiceConfigGenerations(application, timeoutPerService, true); } /** * Fetches the active config generation for all services in the given application. Will not check services * which defer config changes until restart if checkAll is false. */ private Map<ServiceInfo, Long> getServiceConfigGenerations(Application application, Duration timeoutPerService, boolean checkAll) { List<ServiceInfo> servicesToCheck = new ArrayList<>(); application.getModel().getHosts() .forEach(host -> host.getServices().stream() .filter(service -> serviceTypesToCheck.contains(service.getServiceType())) .filter(serviceInfo -> shouldCheckService(checkAll, application, serviceInfo)) .forEach(service -> getStatePort(service).ifPresent(port -> servicesToCheck.add(service)))); log.log(Level.FINE, "Services to check for config convergence: " + servicesToCheck); return getServiceGenerations(servicesToCheck, timeoutPerService); } /** Checks all services in given application. Returns the minimum current generation of all services */ public ServiceListResponse checkConvergenceForAllServices(Application application, Duration timeoutPerService) { return checkConvergence(application, timeoutPerService, true); } /** * Checks services except those which defer config changes until restart in the given application. * Returns the minimum current generation of those services. */ public ServiceListResponse checkConvergenceUnlessDeferringChangesUntilRestart(Application application, Duration timeoutPerService) { return checkConvergence(application, timeoutPerService, false); } private ServiceListResponse checkConvergence(Application application, Duration timeoutPerService, boolean checkAll) { Map<ServiceInfo, Long> currentGenerations = getServiceConfigGenerations(application, timeoutPerService, checkAll); long currentGeneration = currentGenerations.values().stream().mapToLong(Long::longValue).min().orElse(-1); return new ServiceListResponse(currentGenerations, application.getApplicationGeneration(), currentGeneration); } /** Check service identified by host and port in given application */ public ServiceResponse getServiceConfigGeneration(Application application, String hostAndPortToCheck, Duration timeout) { Long wantedGeneration = application.getApplicationGeneration(); try (CloseableHttpAsyncClient client = createHttpClient()) { client.start(); if ( ! hostInApplication(application, hostAndPortToCheck)) return new ServiceResponse(ServiceResponse.Status.hostNotFound, wantedGeneration); long currentGeneration = getServiceGeneration(client, URI.create("http: boolean converged = currentGeneration >= wantedGeneration; return new ServiceResponse(ServiceResponse.Status.ok, wantedGeneration, currentGeneration, converged); } catch (InterruptedException | ExecutionException | CancellationException e) { return new ServiceResponse(ServiceResponse.Status.notFound, wantedGeneration, e.getMessage()); } catch (Exception e) { return new ServiceResponse(ServiceResponse.Status.error, wantedGeneration, e.getMessage()); } } private boolean shouldCheckService(boolean checkServicesWithDeferChangesUntilRestart, Application application, ServiceInfo serviceInfo) { if (checkServicesWithDeferChangesUntilRestart) return true; if (isNotContainer(serviceInfo)) return true; return serviceIsInClusterWhichShouldBeChecked(application, serviceInfo); } private boolean isNotContainer(ServiceInfo serviceInfo) { return ! List.of(CONTAINER.serviceName, QRSERVER.serviceName, METRICS_PROXY_CONTAINER).contains(serviceInfo.getServiceType()); } /** Gets service generation for a list of services (in parallel). */ private Map<ServiceInfo, Long> getServiceGenerations(List<ServiceInfo> services, Duration timeout) { try (CloseableHttpAsyncClient client = createHttpClient()) { client.start(); List<CompletableFuture<Void>> inprogressRequests = new ArrayList<>(); ConcurrentMap<ServiceInfo, Long> temporaryResult = new ConcurrentHashMap<>(); for (ServiceInfo service : services) { int statePort = getStatePort(service).orElse(0); if (statePort <= 0) continue; URI uri = URI.create("http: CompletableFuture<Void> inprogressRequest = getServiceGeneration(client, uri, timeout) .handle((result, error) -> { if (result != null) { temporaryResult.put(service, result); } else { log.log( Level.FINE, error, () -> String.format("Failed to retrieve service config generation for '%s': %s", service, error.getMessage())); temporaryResult.put(service, -1L); } return null; }); inprogressRequests.add(inprogressRequest); } CompletableFuture.allOf(inprogressRequests.toArray(CompletableFuture[]::new)).join(); return createMapOrderedByServiceList(services, temporaryResult); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Get service generation of service at given URL */ private CompletableFuture<Long> getServiceGeneration(CloseableHttpAsyncClient client, URI serviceUrl, Duration timeout) { SimpleHttpRequest request = SimpleRequestBuilder.get(createApiUri(serviceUrl)).build(); request.setConfig(createRequestConfig(timeout)); CompletableFuture<SimpleHttpResponse> responsePromise = new CompletableFuture<>(); client.execute(request, new FutureCallback<>() { @Override public void completed(SimpleHttpResponse result) { responsePromise.complete(result); } @Override public void failed(Exception ex) { responsePromise.completeExceptionally(ex); } @Override public void cancelled() { responsePromise.cancel(false); } }); return responsePromise.thenApplyAsync(this::handleResponse, responseHandlerExecutor); } private long handleResponse(SimpleHttpResponse response) throws UncheckedIOException { try { int statusCode = response.getCode(); if (statusCode != HttpStatus.SC_OK) throw new IOException("Expected status code 200, got " + statusCode); if (response.getBody() == null) throw new IOException("Response has no content"); return generationFromContainerState(jsonMapper.readTree(response.getBodyText())); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean hostInApplication(Application application, String hostPort) { for (HostInfo host : application.getModel().getHosts()) { if (hostPort.startsWith(host.getHostname())) { for (ServiceInfo service : host.getServices()) { for (PortInfo port : service.getPorts()) { if (hostPort.equals(host.getHostname() + ":" + port.getPort())) { return true; } } } } } return false; } public static Optional<Integer> getStatePort(ServiceInfo service) { return service.getPorts().stream() .filter(port -> port.getTags().contains("state")) .map(PortInfo::getPort) .findFirst(); } @Override public void deconstruct() { responseHandlerExecutor.shutdown(); try { responseHandlerExecutor.awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Unable to shutdown executor", e); } } private static long generationFromContainerState(JsonNode state) { return state.get("config").get("generation").asLong(-1); } private static Map<ServiceInfo, Long> createMapOrderedByServiceList( List<ServiceInfo> services, ConcurrentMap<ServiceInfo, Long> result) { Map<ServiceInfo, Long> orderedResult = new LinkedHashMap<>(); for (ServiceInfo service : services) { Long generation = result.get(service); if (generation != null) { orderedResult.put(service, generation); } } return orderedResult; } private static URI createApiUri(URI serviceUrl) { try { return new URIBuilder(serviceUrl) .setPath("/state/v1/config") .build(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } private static RequestConfig createRequestConfig(Duration timeout) { return RequestConfig.custom() .setConnectionRequestTimeout(Timeout.ofSeconds(1)) .setResponseTimeout(Timeout.ofMilliseconds(timeout.toMillis())) .setConnectTimeout(Timeout.ofSeconds(1)) .build(); } private static CloseableHttpAsyncClient createHttpClient() { return VespaAsyncHttpClientBuilder .create(tlsStrategy -> PoolingAsyncClientConnectionManagerBuilder.create() .setMaxConnTotal(100) .setMaxConnPerRoute(10) .setConnectionTimeToLive(TimeValue.ofMilliseconds(1)) .setTlsStrategy(tlsStrategy) .build()) .setIOReactorConfig(IOReactorConfig.custom() .setSoTimeout(Timeout.ofSeconds(2)) .build()) .setUserAgent("config-convergence-checker") .build(); } public static class ServiceResponse { public enum Status { ok, notFound, hostNotFound, error } public final Status status; public final Long wantedGeneration; public final Long currentGeneration; public final boolean converged; public final Optional<String> errorMessage; public ServiceResponse(Status status, long wantedGeneration) { this(status, wantedGeneration, 0); } public ServiceResponse(Status status, long wantedGeneration, long currentGeneration) { this(status, wantedGeneration, currentGeneration, false); } public ServiceResponse(Status status, long wantedGeneration, long currentGeneration, boolean converged) { this(status, wantedGeneration, currentGeneration, converged, Optional.empty()); } public ServiceResponse(Status status, long wantedGeneration, String errorMessage) { this(status, wantedGeneration, 0, false, Optional.ofNullable(errorMessage)); } private ServiceResponse(Status status, long wantedGeneration, long currentGeneration, boolean converged, Optional<String> errorMessage) { this.status = status; this.wantedGeneration = wantedGeneration; this.currentGeneration = currentGeneration; this.converged = converged; this.errorMessage = errorMessage; } } public static class ServiceListResponse { public final List<Service> services = new ArrayList<>(); public final long wantedGeneration; public final long currentGeneration; public final boolean converged; public ServiceListResponse(Map<ServiceInfo, Long> services, long wantedGeneration, long currentGeneration) { services.forEach((key, value) -> this.services.add(new Service(key, value))); this.wantedGeneration = wantedGeneration; this.currentGeneration = currentGeneration; this.converged = currentGeneration >= wantedGeneration; } public List<Service> services() { return services; } public static class Service { public final ServiceInfo serviceInfo; public final Long currentGeneration; public Service(ServiceInfo serviceInfo, Long currentGeneration) { this.serviceInfo = serviceInfo; this.currentGeneration = currentGeneration; } } } }
class ConfigConvergenceChecker extends AbstractComponent { private static final Logger log = Logger.getLogger(ConfigConvergenceChecker.class.getName()); private final static Set<String> serviceTypesToCheck = Set.of( CONTAINER.serviceName, QRSERVER.serviceName, LOGSERVER_CONTAINER.serviceName, CLUSTERCONTROLLER_CONTAINER.serviceName, METRICS_PROXY_CONTAINER.serviceName, "searchnode", "storagenode", "distributor" ); private final ExecutorService responseHandlerExecutor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("config-convergence-checker-response-handler-")); private final ObjectMapper jsonMapper = new ObjectMapper(); @Inject public ConfigConvergenceChecker() {} /** Fetches the active config generation for all services in the given application. */ public Map<ServiceInfo, Long> getServiceConfigGenerations(Application application, Duration timeoutPerService) { return getServiceConfigGenerations(application, timeoutPerService, true); } /** * Fetches the active config generation for all services in the given application. Will not check services * which defer config changes until restart if checkAll is false. */ private Map<ServiceInfo, Long> getServiceConfigGenerations(Application application, Duration timeoutPerService, boolean checkAll) { List<ServiceInfo> servicesToCheck = new ArrayList<>(); application.getModel().getHosts() .forEach(host -> host.getServices().stream() .filter(service -> serviceTypesToCheck.contains(service.getServiceType())) .filter(serviceInfo -> shouldCheckService(checkAll, application, serviceInfo)) .forEach(service -> getStatePort(service).ifPresent(port -> servicesToCheck.add(service)))); log.log(Level.FINE, "Services to check for config convergence: " + servicesToCheck); return getServiceGenerations(servicesToCheck, timeoutPerService); } /** Checks all services in given application. Returns the minimum current generation of all services */ public ServiceListResponse checkConvergenceForAllServices(Application application, Duration timeoutPerService) { return checkConvergence(application, timeoutPerService, true); } /** * Checks services except those which defer config changes until restart in the given application. * Returns the minimum current generation of those services. */ public ServiceListResponse checkConvergenceUnlessDeferringChangesUntilRestart(Application application, Duration timeoutPerService) { return checkConvergence(application, timeoutPerService, false); } private ServiceListResponse checkConvergence(Application application, Duration timeoutPerService, boolean checkAll) { Map<ServiceInfo, Long> currentGenerations = getServiceConfigGenerations(application, timeoutPerService, checkAll); long currentGeneration = currentGenerations.values().stream().mapToLong(Long::longValue).min().orElse(-1); return new ServiceListResponse(currentGenerations, application.getApplicationGeneration(), currentGeneration); } /** Check service identified by host and port in given application */ public ServiceResponse getServiceConfigGeneration(Application application, String hostAndPortToCheck, Duration timeout) { Long wantedGeneration = application.getApplicationGeneration(); try (CloseableHttpAsyncClient client = createHttpClient()) { client.start(); if ( ! hostInApplication(application, hostAndPortToCheck)) return new ServiceResponse(ServiceResponse.Status.hostNotFound, wantedGeneration); long currentGeneration = getServiceGeneration(client, URI.create("http: boolean converged = currentGeneration >= wantedGeneration; return new ServiceResponse(ServiceResponse.Status.ok, wantedGeneration, currentGeneration, converged); } catch (InterruptedException | ExecutionException | CancellationException e) { return new ServiceResponse(ServiceResponse.Status.notFound, wantedGeneration, e.getMessage()); } catch (Exception e) { return new ServiceResponse(ServiceResponse.Status.error, wantedGeneration, e.getMessage()); } } private boolean shouldCheckService(boolean checkServicesWithDeferChangesUntilRestart, Application application, ServiceInfo serviceInfo) { if (checkServicesWithDeferChangesUntilRestart) return true; if (isNotContainer(serviceInfo)) return true; return serviceIsInClusterWhichShouldBeChecked(application, serviceInfo); } private boolean isNotContainer(ServiceInfo serviceInfo) { return ! List.of(CONTAINER.serviceName, QRSERVER.serviceName, METRICS_PROXY_CONTAINER).contains(serviceInfo.getServiceType()); } /** Gets service generation for a list of services (in parallel). */ private Map<ServiceInfo, Long> getServiceGenerations(List<ServiceInfo> services, Duration timeout) { try (CloseableHttpAsyncClient client = createHttpClient()) { client.start(); List<CompletableFuture<Void>> inprogressRequests = new ArrayList<>(); ConcurrentMap<ServiceInfo, Long> temporaryResult = new ConcurrentHashMap<>(); for (ServiceInfo service : services) { int statePort = getStatePort(service).orElse(0); if (statePort <= 0) continue; URI uri = URI.create("http: CompletableFuture<Void> inprogressRequest = getServiceGeneration(client, uri, timeout) .handle((result, error) -> { if (result != null) { temporaryResult.put(service, result); } else { log.log( Level.FINE, error, () -> String.format("Failed to retrieve service config generation for '%s': %s", service, error.getMessage())); temporaryResult.put(service, -1L); } return null; }); inprogressRequests.add(inprogressRequest); } CompletableFuture.allOf(inprogressRequests.toArray(CompletableFuture[]::new)).join(); return createMapOrderedByServiceList(services, temporaryResult); } catch (IOException e) { throw new UncheckedIOException(e); } } /** Get service generation of service at given URL */ private CompletableFuture<Long> getServiceGeneration(CloseableHttpAsyncClient client, URI serviceUrl, Duration timeout) { SimpleHttpRequest request = SimpleRequestBuilder.get(createApiUri(serviceUrl)).build(); request.setConfig(createRequestConfig(timeout)); CompletableFuture<SimpleHttpResponse> responsePromise = new CompletableFuture<>(); client.execute(request, new FutureCallback<>() { @Override public void completed(SimpleHttpResponse result) { responsePromise.complete(result); } @Override public void failed(Exception ex) { responsePromise.completeExceptionally(ex); } @Override public void cancelled() { responsePromise.cancel(false); } }); return responsePromise.thenApplyAsync(this::handleResponse, responseHandlerExecutor); } private long handleResponse(SimpleHttpResponse response) throws UncheckedIOException { try { int statusCode = response.getCode(); if (statusCode != HttpStatus.SC_OK) throw new IOException("Expected status code 200, got " + statusCode); if (response.getBody() == null) throw new IOException("Response has no content"); return generationFromContainerState(jsonMapper.readTree(response.getBodyText())); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean hostInApplication(Application application, String hostPort) { for (HostInfo host : application.getModel().getHosts()) { if (hostPort.startsWith(host.getHostname())) { for (ServiceInfo service : host.getServices()) { for (PortInfo port : service.getPorts()) { if (hostPort.equals(host.getHostname() + ":" + port.getPort())) { return true; } } } } } return false; } public static Optional<Integer> getStatePort(ServiceInfo service) { return service.getPorts().stream() .filter(port -> port.getTags().contains("state")) .map(PortInfo::getPort) .findFirst(); } @Override public void deconstruct() { responseHandlerExecutor.shutdown(); try { responseHandlerExecutor.awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Unable to shutdown executor", e); } } private static long generationFromContainerState(JsonNode state) { return state.get("config").get("generation").asLong(-1); } private static Map<ServiceInfo, Long> createMapOrderedByServiceList( List<ServiceInfo> services, ConcurrentMap<ServiceInfo, Long> result) { Map<ServiceInfo, Long> orderedResult = new LinkedHashMap<>(); for (ServiceInfo service : services) { Long generation = result.get(service); if (generation != null) { orderedResult.put(service, generation); } } return orderedResult; } private static URI createApiUri(URI serviceUrl) { try { return new URIBuilder(serviceUrl) .setPath("/state/v1/config") .build(); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } } private static RequestConfig createRequestConfig(Duration timeout) { return RequestConfig.custom() .setConnectionRequestTimeout(Timeout.ofSeconds(1)) .setResponseTimeout(Timeout.ofMilliseconds(timeout.toMillis())) .setConnectTimeout(Timeout.ofSeconds(1)) .build(); } private static CloseableHttpAsyncClient createHttpClient() { return VespaAsyncHttpClientBuilder .create(tlsStrategy -> PoolingAsyncClientConnectionManagerBuilder.create() .setMaxConnTotal(100) .setMaxConnPerRoute(10) .setConnectionTimeToLive(TimeValue.ofMilliseconds(1)) .setTlsStrategy(tlsStrategy) .build()) .setIOReactorConfig(IOReactorConfig.custom() .setSoTimeout(Timeout.ofSeconds(2)) .build()) .setUserAgent("config-convergence-checker") .build(); } public static class ServiceResponse { public enum Status { ok, notFound, hostNotFound, error } public final Status status; public final Long wantedGeneration; public final Long currentGeneration; public final boolean converged; public final Optional<String> errorMessage; public ServiceResponse(Status status, long wantedGeneration) { this(status, wantedGeneration, 0); } public ServiceResponse(Status status, long wantedGeneration, long currentGeneration) { this(status, wantedGeneration, currentGeneration, false); } public ServiceResponse(Status status, long wantedGeneration, long currentGeneration, boolean converged) { this(status, wantedGeneration, currentGeneration, converged, Optional.empty()); } public ServiceResponse(Status status, long wantedGeneration, String errorMessage) { this(status, wantedGeneration, 0, false, Optional.ofNullable(errorMessage)); } private ServiceResponse(Status status, long wantedGeneration, long currentGeneration, boolean converged, Optional<String> errorMessage) { this.status = status; this.wantedGeneration = wantedGeneration; this.currentGeneration = currentGeneration; this.converged = converged; this.errorMessage = errorMessage; } } public static class ServiceListResponse { public final List<Service> services = new ArrayList<>(); public final long wantedGeneration; public final long currentGeneration; public final boolean converged; public ServiceListResponse(Map<ServiceInfo, Long> services, long wantedGeneration, long currentGeneration) { services.forEach((key, value) -> this.services.add(new Service(key, value))); this.wantedGeneration = wantedGeneration; this.currentGeneration = currentGeneration; this.converged = currentGeneration >= wantedGeneration; } public List<Service> services() { return services; } public static class Service { public final ServiceInfo serviceInfo; public final Long currentGeneration; public Service(ServiceInfo serviceInfo, Long currentGeneration) { this.serviceInfo = serviceInfo; this.currentGeneration = currentGeneration; } } } }
It's cached in the orchestrator, which is a local component. Should be fine.
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
if (suspended(host)) return false;
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
"thie belowh"? 😄
public void testMultipleInstancesWithDifferentChanges() { DeploymentContext i1 = tester.newDeploymentContext("t", "a", "i1"); DeploymentContext i2 = tester.newDeploymentContext("t", "a", "i2"); DeploymentContext i3 = tester.newDeploymentContext("t", "a", "i3"); DeploymentContext i4 = tester.newDeploymentContext("t", "a", "i4"); ApplicationPackage applicationPackage = ApplicationPackageBuilder .fromDeploymentXml("<deployment version='1'>\n" + " <parallel>\n" + " <instance id='i1'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='6' />\n" + " </prod>\n" + " </instance>\n" + " <instance id='i2'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='i3'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='18' />\n" + " <test>us-east-3</test>\n" + " </prod>\n" + " </instance>\n" + " <instance id='i4'>\n" + " <test />\n" + " <staging />\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"); i1.submit(applicationPackage); Optional<ApplicationVersion> v0 = i1.lastSubmission(); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().change().application()); assertEquals(v0, i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().latestDeployed()); assertEquals(v0, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(6)); tester.outstandingChangeDeployer().run(); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i3.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(12)); i1.submit(applicationPackage); Optional<ApplicationVersion> v1 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); assertEquals(v1, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(3)); i1.submit(applicationPackage); Optional<ApplicationVersion> v2 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(3)); assertEquals(v0, i3.instance().change().application()); i3.runJob(testUsEast3); assertEquals(Optional.empty(), i3.instance().change().application()); tester.outstandingChangeDeployer().run(); assertEquals(v2, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(v0, i3.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(v2, i2.instance().change().application()); assertEquals(v1, i3.instance().change().application()); assertEquals(v0, i4.instance().change().application()); }
public void testMultipleInstancesWithDifferentChanges() { DeploymentContext i1 = tester.newDeploymentContext("t", "a", "i1"); DeploymentContext i2 = tester.newDeploymentContext("t", "a", "i2"); DeploymentContext i3 = tester.newDeploymentContext("t", "a", "i3"); DeploymentContext i4 = tester.newDeploymentContext("t", "a", "i4"); ApplicationPackage applicationPackage = ApplicationPackageBuilder .fromDeploymentXml("<deployment version='1'>\n" + " <parallel>\n" + " <instance id='i1'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='6' />\n" + " </prod>\n" + " </instance>\n" + " <instance id='i2'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='i3'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='18' />\n" + " <test>us-east-3</test>\n" + " </prod>\n" + " </instance>\n" + " <instance id='i4'>\n" + " <test />\n" + " <staging />\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"); i1.submit(applicationPackage); Optional<ApplicationVersion> v0 = i1.lastSubmission(); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().change().application()); assertEquals(v0, i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().latestDeployed()); assertEquals(v0, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(6)); tester.outstandingChangeDeployer().run(); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i3.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(12)); i1.submit(applicationPackage); Optional<ApplicationVersion> v1 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); assertEquals(v1, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(3)); i1.submit(applicationPackage); Optional<ApplicationVersion> v2 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(3)); assertEquals(v0, i3.instance().change().application()); i3.runJob(testUsEast3); assertEquals(Optional.empty(), i3.instance().change().application()); tester.outstandingChangeDeployer().run(); assertEquals(v2, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(v0, i3.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(v2, i2.instance().change().application()); assertEquals(v1, i3.instance().change().application()); assertEquals(v0, i4.instance().change().application()); }
class DeploymentTriggerTest { private final DeploymentTester tester = new DeploymentTester(); @Test public void testTriggerFailing() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version version = Version.fromString("6.3"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.failDeployment(stagingTest); tester.triggerJobs(); assertEquals("Retried dead job", 2, tester.jobs().active().size()); app.assertRunning(stagingTest); app.runJob(stagingTest); app.assertRunning(systemTest); assertEquals(1, tester.jobs().active().size()); app.timeOutUpgrade(systemTest); tester.triggerJobs(); assertEquals("Job is retried on failure", 1, tester.jobs().active().size()); app.runJob(systemTest); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.applications().lockApplicationOrThrow(app.application().id(), locked -> tester.applications().store(locked.withProjectId(OptionalLong.empty()))); app.timeOutConvergence(productionUsWest1); tester.triggerJobs(); assertEquals("Job is not triggered when no projectId is present", 0, tester.jobs().active().size()); } @Test public void leadingUpgradeAllowsApplicationChangeWhileUpgrading() { var applicationPackage = new ApplicationPackageBuilder().region("us-east-3") .upgradeRollout("leading") .build(); var app = tester.newDeploymentContext(); app.submit(applicationPackage).deploy(); Change upgrade = Change.of(new Version("7.8.9")); tester.controllerTester().upgradeSystem(upgrade.platform().get()); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertRunning(productionUsEast3); assertEquals(upgrade, app.instance().change()); app.submit(applicationPackage); assertEquals(upgrade.with(app.lastSubmission().get()), app.instance().change()); } @Test public void abortsJobsOnNewApplicationChange() { var app = tester.newDeploymentContext(); app.submit() .runJob(systemTest) .runJob(stagingTest); tester.triggerJobs(); RunId id = tester.jobs().last(app.instanceId(), productionUsCentral1).get().id(); assertTrue(tester.jobs().active(id).isPresent()); app.submit(); assertTrue(tester.jobs().active(id).isPresent()); tester.triggerJobs(); tester.runner().run(); assertTrue(tester.jobs().active(id).isPresent()); app.runJob(systemTest).runJob(stagingTest).runJob(stagingTest); tester.triggerJobs(); app.jobAborted(productionUsCentral1); app.runJob(productionUsCentral1).runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.controllerTester().upgradeSystem(new Version("8.9")); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.submit(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.runner().run(); assertEquals(EnumSet.of(productionUsCentral1), tester.jobs().active().stream() .map(run -> run.id().type()) .collect(Collectors.toCollection(() -> EnumSet.noneOf(JobType.class)))); } @Test public void deploymentSpecWithDelays() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .systemTest() .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest); tester.clock().advance(Duration.ofSeconds(15)); app.runJob(stagingTest); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); tester.clock().advance(Duration.ofSeconds(15)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.triggerJobs(); app.runJob(productionUsCentral1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.triggerJobs(); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionUsCentral1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionEuWest1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void testNoOtherChangesDuringSuspension() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .build(); var application = tester.newDeploymentContext().submit().deploy(); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), true); application.submit() .runJob(systemTest) .runJob(stagingTest) .runJob(productionUsCentral1); tester.triggerJobs(); application.assertNotRunning(productionUsEast3); application.assertNotRunning(productionUsWest1); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), false); tester.triggerJobs(); application.runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), application.instance().change()); } @Test public void testBlockRevisionChange() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); Version version = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.clock().advance(Duration.ofHours(1)); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); app.submit(applicationPackage); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(systemTest).runJob(stagingTest); tester.outstandingChangeDeployer().run(); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); assertEquals(emptyList(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(2)); tester.outstandingChangeDeployer().run(); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); app.assertRunning(productionUsWest1); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(true, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.submit(applicationPackage); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); tester.outstandingChangeDeployer().run(); app.runJob(productionUsWest1); assertEquals(1, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.deploymentStatus().outstandingChange(app.instance().name()).application().get().buildNumber().getAsLong()); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app.runJob(productionUsEast3); assertEquals(2, tester.jobs().active().size()); assertEquals(Change.empty(), app.instance().change()); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); tester.outstandingChangeDeployer().run(); assertTrue(app.instance().change().hasTargets()); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(productionUsWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); } @Test public void testJobPause() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.controllerTester().upgradeSystem(new Version("9.8.7")); tester.upgrader().maintain(); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsEast3, tester.clock().instant().plus(Duration.ofSeconds(3))); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1500)); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); app.failDeployment(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1000)); tester.triggerJobs(); app.runJob(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsEast3); tester.deploymentTrigger().forceTrigger(app.instanceId(), productionUsEast3, "mrTrigger", true); app.assertRunning(productionUsEast3); assertFalse(app.instance().jobPause(productionUsEast3).isPresent()); } @Test public void applicationVersionIsNotDowngraded() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest) .timeOutUpgrade(productionUsCentral1); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(Change.of(appVersion1), app.instance().change()); tester.deploymentTrigger().cancelChange(app.instanceId(), ALL); assertEquals(Change.empty(), app.instance().change()); Version version1 = new Version("6.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).failDeployment(productionUsCentral1); app.runJob(systemTest).runJob(stagingTest); app.runJob(productionUsCentral1).runJob(productionEuWest1); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test public void downgradingApplicationVersionWorks() { var app = tester.newDeploymentContext().submit().deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion0)); assertEquals(Change.of(appVersion0), app.instance().change()); app.runJob(stagingTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); assertEquals(Change.empty(), app.instance().change()); assertEquals(appVersion0, app.instance().deployments().get(productionUsEast3.zone(tester.controller().system())).applicationVersion()); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); } @Test public void settingANoOpChangeIsANoOp() { var app = tester.newDeploymentContext().submit(); assertEquals(Optional.empty(), app.instance().latestDeployed()); app.deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion1)); assertEquals(Change.empty(), app.instance().change()); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); } @Test public void stepIsCompletePreciselyWhenItShouldBe() { var app1 = tester.newDeploymentContext("tenant1", "app1", "default"); var app2 = tester.newDeploymentContext("tenant1", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); Version version0 = Version.fromString("7.0"); tester.controllerTester().upgradeSystem(version0); app1.submit(applicationPackage).deploy(); app2.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app2.deployPlatform(version1); tester.deploymentTrigger().cancelChange(app1.instanceId(), ALL); Version version2 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); tester.triggerJobs(); app1.jobAborted(systemTest).jobAborted(stagingTest); app1.runJob(systemTest).runJob(stagingTest).timeOutConvergence(productionUsCentral1); assertEquals(version2, app1.deployment(productionUsCentral1.zone(main)).version()); Instant triggered = app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start(); tester.clock().advance(Duration.ofHours(1)); tester.upgrader().overrideConfidence(version2, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); assertEquals("Change becomes latest non-broken version", Change.of(version1), app1.instance().change()); app1.runJob(systemTest).runJob(stagingTest) .failDeployment(productionEuWest1); assertEquals(triggered, app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start()); ApplicationVersion revision1 = app1.lastSubmission().get(); app1.submit(applicationPackage); ApplicationVersion revision2 = app1.lastSubmission().get(); app1.runJob(systemTest).runJob(stagingTest); assertEquals(Change.of(version1).with(revision2), app1.instance().change()); tester.triggerJobs(); app1.assertRunning(productionUsCentral1); assertEquals(version2, app1.instance().deployments().get(productionUsCentral1.zone(main)).version()); assertEquals(revision1, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); assertTrue(triggered.isBefore(app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start())); app1.timeOutUpgrade(productionUsCentral1); assertEquals(version2, app1.instance().deployments().get(productionUsCentral1.zone(main)).version()); assertEquals(revision2, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.triggerJobs(); app1.assertNotRunning(productionUsCentral1); app1.runJob(systemTest).runJob(stagingTest).runJob(productionEuWest1); assertFalse(app1.instance().change().hasTargets()); assertFalse(app1.instanceJobs().get(productionUsCentral1).isSuccess()); } @Test public void eachParallelDeployTargetIsTested() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .parallel("eu-west-1", "us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); app.timeOutConvergence(productionEuWest1); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(v2, app.deployment(productionEuWest1.zone(main)).version()); assertEquals(v1, app.deployment(productionUsEast3.zone(main)).version()); app.submit(applicationPackage); tester.triggerJobs(); Version firstTested = app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform(); assertEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertNotEquals(firstTested, app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform()); assertNotEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); app.triggerJobs().jobAborted(productionUsEast3); app.failDeployment(productionEuWest1).failDeployment(productionUsEast3) .runJob(productionEuWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); assertEquals(2, app.instanceJobs().get(productionEuWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); } @Test public void retriesFailingJobs() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage).runJob(stagingTest).failDeployment(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertRunning(systemTest); tester.clock().advance(Duration.ofSeconds(1)); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofMinutes(10).plus(Duration.ofSeconds(1))); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.clock().advance(Duration.ofMinutes(15)); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofSeconds(2)); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); app.submit(applicationPackage).deploy(); assertTrue("Deployment completed", tester.jobs().active().isEmpty()); } @Test public void testPlatformVersionSelection() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); Version version1 = tester.controller().readSystemVersion(); var app1 = tester.newDeploymentContext(); app1.submit(applicationPackage).deploy(); assertEquals("First deployment gets system version", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); Version version2 = new Version(version1.getMajor(), version1.getMinor() + 1); tester.controllerTester().upgradeSystem(version2); applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); app1.submit(applicationPackage).deploy(); assertEquals("Application change preserves version, and new region gets oldest version too", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); assertFalse("Change deployed", app1.instance().change().hasTargets()); tester.upgrader().maintain(); app1.deployPlatform(version2); assertEquals("Version upgrade changes version", version2, app1.application().oldestDeployedPlatform().get()); assertEquals(version2, tester.configServer().lastPrepareVersion().get()); } @Test public void requeueOutOfCapacityStagingJob() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); var app1 = tester.newDeploymentContext("tenant1", "app1", "default").submit(applicationPackage); var app2 = tester.newDeploymentContext("tenant2", "app2", "default").submit(applicationPackage); var app3 = tester.newDeploymentContext("tenant3", "app3", "default").submit(applicationPackage); app2.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app1.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app3.runJob(systemTest); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); tester.abortAll(); assertEquals(List.of(), tester.jobs().active()); tester.readyJobsTrigger().maintain(); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app3.outOfCapacity(stagingTest); app1.abortJob(stagingTest); app2.abortJob(stagingTest); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app2.deploy(); app3.deploy(); app1.runJob(stagingTest); assertEquals(0, tester.jobs().active().size()); tester.controllerTester().upgradeSystem(new Version("6.2")); tester.upgrader().maintain(); app1.submit(applicationPackage); tester.readyJobsTrigger().run(); app1.assertRunning(systemTest); app1.assertRunning(stagingTest); assertEquals(2, tester.jobs().active().size()); tester.triggerJobs(); app3.outOfCapacity(systemTest); app1.abortJob(systemTest); app1.abortJob(stagingTest); app2.abortJob(systemTest); app2.abortJob(stagingTest); app3.abortJob(stagingTest); assertEquals(0, tester.jobs().active().size()); assertTrue(app1.instance().change().application().isPresent()); assertFalse(app2.instance().change().application().isPresent()); assertFalse(app3.instance().change().application().isPresent()); tester.readyJobsTrigger().maintain(); app1.assertRunning(stagingTest); app3.assertRunning(systemTest); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app1.assertRunning(systemTest); assertEquals(4, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); app2.assertRunning(stagingTest); app2.assertRunning(systemTest); assertEquals(6, tester.jobs().active().size()); } @Test public void testUserInstancesNotInDeploymentSpec() { var app = tester.newDeploymentContext(); tester.controller().applications().createInstance(app.application().id().instance("user")); app.submit().deploy(); } @Test @Test public void testMultipleInstances() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("instance1,instance2") .region("us-east-3") .build(); var app = tester.newDeploymentContext("tenant1", "application1", "instance1") .submit(applicationPackage) .completeRollout(); assertEquals(2, app.application().instances().size()); assertEquals(2, app.application().productionDeployments().values().stream() .mapToInt(Collection::size) .sum()); } @Test public void testDeclaredProductionTests() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .delay(Duration.ofMinutes(1)) .test("us-east-3") .region("us-west-1") .region("us-central-1") .test("us-central-1") .test("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(1)); app.runJob(testUsEast3) .runJob(productionUsWest1).runJob(productionUsCentral1) .runJob(testUsCentral1).runJob(testUsWest1); assertEquals(Change.empty(), app.instance().change()); Version version0 = app.application().oldestDeployedPlatform().get(); Version version1 = Version.fromString("7.7"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.triggerJobs(); app.assertRunning(testUsEast3); tester.upgrader().overrideConfidence(version1, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); app.failDeployment(testUsEast3); app.assertNotRunning(testUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version0).withPin()); app.runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.clock().advance(Duration.ofMinutes(11)); app.runJob(testUsEast3); assertEquals(Change.empty().withPin(), app.instance().change()); } @Test public void testDeployComplicatedDeploymentSpec() { String complicatedDeploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <parallel>\n" + " <instance id='instance' athenz-service='in-service'>\n" + " <staging />\n" + " <prod>\n" + " <parallel>\n" + " <region active='true'>us-west-1</region>\n" + " <steps>\n" + " <region active='true'>us-east-3</region>\n" + " <delay hours='2' />\n" + " <region active='true'>eu-west-1</region>\n" + " <delay hours='2' />\n" + " </steps>\n" + " <steps>\n" + " <delay hours='3' />\n" + " <region active='true'>aws-us-east-1a</region>\n" + " <parallel>\n" + " <region active='true' athenz-service='no-service'>ap-northeast-1</region>\n" + " <region active='true'>ap-northeast-2</region>\n" + " <test>aws-us-east-1a</test>\n" + " </parallel>\n" + " </steps>\n" + " <delay hours='3' minutes='30' />\n" + " </parallel>\n" + " <parallel>\n" + " <test>ap-northeast-2</test>\n" + " <test>ap-northeast-1</test>\n" + " </parallel>\n" + " <test>us-east-3</test>\n" + " <region active='true'>ap-southeast-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id='foo' container-id='bar'>\n" + " <region>us-east-3</region>\n" + " </endpoint>\n" + " <endpoint id='nalle' container-id='frosk' />\n" + " <endpoint container-id='quux' />\n" + " </endpoints>\n" + " </instance>\n" + " <instance id='other'>\n" + " <upgrade policy='conservative' />\n" + " <test />\n" + " <block-change revision='true' version='false' days='sat' hours='0-23' time-zone='CET' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " <notifications when='failing'>\n" + " <email role='author' />\n" + " <email address='john@dev' when='failing-commit' />\n" + " <email address='jane@dev' />\n" + " </notifications>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='last'>\n" + " <upgrade policy='conservative' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(complicatedDeploymentSpec); var app1 = tester.newDeploymentContext("t", "a", "instance").submit(applicationPackage); var app2 = tester.newDeploymentContext("t", "a", "other"); var app3 = tester.newDeploymentContext("t", "a", "last"); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(systemTest); app1.runJob(productionUsWest1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.assertNotRunning(testEuWest1); app2.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(testEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app1.runJob(testAwsUsEast1a); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApNortheast1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); app1.runJob(testApNortheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApSoutheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.atMondayMorning().clock().advance(Duration.ofDays(5)); Version version = Version.fromString("8.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); assertEquals(Change.of(version), app1.instance().change()); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app2.failDeployment(systemTest); app1.submit(applicationPackage); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app1.instance().change()); app2.runJob(systemTest); app1.jobAborted(stagingTest) .runJob(stagingTest) .runJob(productionUsWest1) .runJob(productionUsEast3); app1.runJob(stagingTest); app2.runJob(systemTest); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); tester.triggerJobs(); app1.runJob(testAwsUsEast1a); app1.runJob(productionApNortheast2); app1.runJob(productionApNortheast1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(testApNortheast1); app1.runJob(testApNortheast2); app1.runJob(testUsEast3); app1.runJob(productionApSoutheast1); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app1.runJob(stagingTest); app2.runJob(systemTest) .runJob(productionEuWest1) .failDeployment(testEuWest1); tester.clock().advance(Duration.ofDays(1)); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(0, tester.jobs().active().size()); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app2.instance().change()); app2.runJob(productionEuWest1) .runJob(testEuWest1); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(version), app3.instance().change()); tester.deploymentTrigger().cancelChange(app3.instanceId(), ALL); tester.outstandingChangeDeployer().run(); tester.upgrader().maintain(); assertEquals(Change.of(app1.application().latestVersion().get()), app3.instance().change()); app3.runJob(productionEuWest1); tester.upgrader().maintain(); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); assertEquals(Change.empty(), app3.instance().change()); } @Test public void testChangeCompletion() { var app = tester.newDeploymentContext().submit().deploy(); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsCentral1); app.submit(); tester.triggerJobs(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(version), app.instance().change()); app.runJob(productionUsEast3).runJob(productionUsWest1); tester.triggerJobs(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(app.lastSubmission().get()), app.instance().change()); } @Test public void mixedDirectAndPipelineJobsInProduction() { ApplicationPackage cdPackage = new ApplicationPackageBuilder().region("cd-us-east-1") .region("cd-aws-us-east-1a") .build(); var zones = List.of(ZoneId.from("test.cd-us-west-1"), ZoneId.from("staging.cd-us-west-1"), ZoneId.from("prod.cd-us-east-1"), ZoneId.from("prod.cd-aws-us-east-1a")); tester.controllerTester() .setZones(zones, SystemName.cd) .setRoutingMethod(zones, RoutingMethod.shared); tester.controllerTester().upgradeSystem(Version.fromString("6.1")); tester.controllerTester().computeVersionStatus(); var app = tester.newDeploymentContext(); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(systemTest) .jobAborted(stagingTest) .runJob(systemTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .jobAborted(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); } @Test public void testsInSeparateInstance() { String deploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id='canary'>\n" + " <upgrade policy='canary' />\n" + " <test />\n" + " <staging />\n" + " </instance>\n" + " <instance id='default'>\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentSpec); var canary = tester.newDeploymentContext("t", "a", "canary").submit(applicationPackage); var conservative = tester.newDeploymentContext("t", "a", "default"); canary.runJob(systemTest) .runJob(stagingTest); conservative.runJob(productionEuWest1) .runJob(testEuWest1); canary.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest); tester.outstandingChangeDeployer().run(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); tester.controllerTester().upgradeSystem(new Version("7.7.7")); tester.upgrader().maintain(); canary.runJob(systemTest) .runJob(stagingTest); tester.upgrader().maintain(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); } @Test public void testEagerTests() { var app = tester.newDeploymentContext().submit().deploy(); Version version1 = new Version("7.8.9"); ApplicationVersion build1 = app.lastSubmission().get(); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(stagingTest); app.submit(); ApplicationVersion build2 = app.lastSubmission().get(); assertNotEquals(build1, build2); tester.triggerJobs(); app.assertRunning(stagingTest); assertEquals(version1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetPlatform()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetApplication()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourcePlatform().get()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourceApplication().get()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); assertEquals(build2, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetApplication()); app.runJob(systemTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); tester.outstandingChangeDeployer().run(); assertEquals(RunStatus.running, tester.jobs().last(app.instanceId(), stagingTest).get().status()); app.runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(stagingTest); } @Test public void testTriggeringOfIdleTestJobsWhenFirstDeploymentIsOnNewerVersionThanChange() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder().systemTest() .stagingTest() .region("us-east-3") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); var appToAvoidVersionGC = tester.newDeploymentContext("g", "c", "default").submit().deploy(); Version version2 = new Version("7.8.9"); Version version3 = new Version("8.9.10"); tester.controllerTester().upgradeSystem(version2); tester.deploymentTrigger().triggerChange(appToAvoidVersionGC.instanceId(), Change.of(version2)); appToAvoidVersionGC.deployPlatform(version2); tester.controllerTester().upgradeSystem(version3); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version3)); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.upgrader().overrideConfidence(version3, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().run(); assertEquals(Optional.of(version2), app.instance().change().platform()); app.runJob(systemTest) .runJob(productionUsEast3) .runJob(stagingTest) .runJob(productionUsWest1); assertEquals(version3, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetPlatform()); assertEquals(version2, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetPlatform()); assertEquals(Map.of(), app.deploymentStatus().jobsToRun()); assertEquals(Change.empty(), app.instance().change()); assertEquals(List.of(), tester.jobs().active()); } @Test public void testRetriggerQueue() { var app = tester.newDeploymentContext().submit().deploy(); app.submit(); tester.triggerJobs(); tester.deploymentTrigger().reTrigger(app.instanceId(), productionUsEast3); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries(); Assert.assertEquals(1, retriggerEntries.size()); } }
class DeploymentTriggerTest { private final DeploymentTester tester = new DeploymentTester(); @Test public void testTriggerFailing() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version version = Version.fromString("6.3"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.failDeployment(stagingTest); tester.triggerJobs(); assertEquals("Retried dead job", 2, tester.jobs().active().size()); app.assertRunning(stagingTest); app.runJob(stagingTest); app.assertRunning(systemTest); assertEquals(1, tester.jobs().active().size()); app.timeOutUpgrade(systemTest); tester.triggerJobs(); assertEquals("Job is retried on failure", 1, tester.jobs().active().size()); app.runJob(systemTest); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.applications().lockApplicationOrThrow(app.application().id(), locked -> tester.applications().store(locked.withProjectId(OptionalLong.empty()))); app.timeOutConvergence(productionUsWest1); tester.triggerJobs(); assertEquals("Job is not triggered when no projectId is present", 0, tester.jobs().active().size()); } @Test public void leadingUpgradeAllowsApplicationChangeWhileUpgrading() { var applicationPackage = new ApplicationPackageBuilder().region("us-east-3") .upgradeRollout("leading") .build(); var app = tester.newDeploymentContext(); app.submit(applicationPackage).deploy(); Change upgrade = Change.of(new Version("7.8.9")); tester.controllerTester().upgradeSystem(upgrade.platform().get()); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertRunning(productionUsEast3); assertEquals(upgrade, app.instance().change()); app.submit(applicationPackage); assertEquals(upgrade.with(app.lastSubmission().get()), app.instance().change()); } @Test public void abortsJobsOnNewApplicationChange() { var app = tester.newDeploymentContext(); app.submit() .runJob(systemTest) .runJob(stagingTest); tester.triggerJobs(); RunId id = tester.jobs().last(app.instanceId(), productionUsCentral1).get().id(); assertTrue(tester.jobs().active(id).isPresent()); app.submit(); assertTrue(tester.jobs().active(id).isPresent()); tester.triggerJobs(); tester.runner().run(); assertTrue(tester.jobs().active(id).isPresent()); app.runJob(systemTest).runJob(stagingTest).runJob(stagingTest); tester.triggerJobs(); app.jobAborted(productionUsCentral1); app.runJob(productionUsCentral1).runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.controllerTester().upgradeSystem(new Version("8.9")); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.submit(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.runner().run(); assertEquals(EnumSet.of(productionUsCentral1), tester.jobs().active().stream() .map(run -> run.id().type()) .collect(Collectors.toCollection(() -> EnumSet.noneOf(JobType.class)))); } @Test public void deploymentSpecWithDelays() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .systemTest() .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest); tester.clock().advance(Duration.ofSeconds(15)); app.runJob(stagingTest); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); tester.clock().advance(Duration.ofSeconds(15)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.triggerJobs(); app.runJob(productionUsCentral1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.triggerJobs(); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionUsCentral1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionEuWest1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void testNoOtherChangesDuringSuspension() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .build(); var application = tester.newDeploymentContext().submit().deploy(); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), true); application.submit() .runJob(systemTest) .runJob(stagingTest) .runJob(productionUsCentral1); tester.triggerJobs(); application.assertNotRunning(productionUsEast3); application.assertNotRunning(productionUsWest1); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), false); tester.triggerJobs(); application.runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), application.instance().change()); } @Test public void testBlockRevisionChange() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); Version version = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.clock().advance(Duration.ofHours(1)); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); app.submit(applicationPackage); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(systemTest).runJob(stagingTest); tester.outstandingChangeDeployer().run(); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); assertEquals(emptyList(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(2)); tester.outstandingChangeDeployer().run(); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); app.assertRunning(productionUsWest1); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(true, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.submit(applicationPackage); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); tester.outstandingChangeDeployer().run(); app.runJob(productionUsWest1); assertEquals(1, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.deploymentStatus().outstandingChange(app.instance().name()).application().get().buildNumber().getAsLong()); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app.runJob(productionUsEast3); assertEquals(2, tester.jobs().active().size()); assertEquals(Change.empty(), app.instance().change()); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); tester.outstandingChangeDeployer().run(); assertTrue(app.instance().change().hasTargets()); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(productionUsWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); } @Test public void testJobPause() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.controllerTester().upgradeSystem(new Version("9.8.7")); tester.upgrader().maintain(); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsEast3, tester.clock().instant().plus(Duration.ofSeconds(3))); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1500)); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); app.failDeployment(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1000)); tester.triggerJobs(); app.runJob(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsEast3); tester.deploymentTrigger().forceTrigger(app.instanceId(), productionUsEast3, "mrTrigger", true); app.assertRunning(productionUsEast3); assertFalse(app.instance().jobPause(productionUsEast3).isPresent()); } @Test public void applicationVersionIsNotDowngraded() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest) .timeOutUpgrade(productionUsCentral1); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(Change.of(appVersion1), app.instance().change()); tester.deploymentTrigger().cancelChange(app.instanceId(), ALL); assertEquals(Change.empty(), app.instance().change()); Version version1 = new Version("6.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).failDeployment(productionUsCentral1); app.runJob(systemTest).runJob(stagingTest); app.runJob(productionUsCentral1).runJob(productionEuWest1); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test public void downgradingApplicationVersionWorks() { var app = tester.newDeploymentContext().submit().deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion0)); assertEquals(Change.of(appVersion0), app.instance().change()); app.runJob(stagingTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); assertEquals(Change.empty(), app.instance().change()); assertEquals(appVersion0, app.instance().deployments().get(productionUsEast3.zone(tester.controller().system())).applicationVersion()); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); } @Test public void settingANoOpChangeIsANoOp() { var app = tester.newDeploymentContext().submit(); assertEquals(Optional.empty(), app.instance().latestDeployed()); app.deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion1)); assertEquals(Change.empty(), app.instance().change()); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); } @Test public void stepIsCompletePreciselyWhenItShouldBe() { var app1 = tester.newDeploymentContext("tenant1", "app1", "default"); var app2 = tester.newDeploymentContext("tenant1", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); Version version0 = Version.fromString("7.0"); tester.controllerTester().upgradeSystem(version0); app1.submit(applicationPackage).deploy(); app2.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app2.deployPlatform(version1); tester.deploymentTrigger().cancelChange(app1.instanceId(), ALL); Version version2 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); tester.triggerJobs(); app1.jobAborted(systemTest).jobAborted(stagingTest); app1.runJob(systemTest).runJob(stagingTest).timeOutConvergence(productionUsCentral1); assertEquals(version2, app1.deployment(productionUsCentral1.zone(main)).version()); Instant triggered = app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start(); tester.clock().advance(Duration.ofHours(1)); tester.upgrader().overrideConfidence(version2, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); assertEquals("Change becomes latest non-broken version", Change.of(version1), app1.instance().change()); app1.runJob(systemTest).runJob(stagingTest) .failDeployment(productionEuWest1); assertEquals(triggered, app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start()); ApplicationVersion revision1 = app1.lastSubmission().get(); app1.submit(applicationPackage); ApplicationVersion revision2 = app1.lastSubmission().get(); app1.runJob(systemTest).runJob(stagingTest); assertEquals(Change.of(version1).with(revision2), app1.instance().change()); tester.triggerJobs(); app1.assertRunning(productionUsCentral1); assertEquals(version2, app1.instance().deployments().get(productionUsCentral1.zone(main)).version()); assertEquals(revision1, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); assertTrue(triggered.isBefore(app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start())); app1.timeOutUpgrade(productionUsCentral1); assertEquals(version2, app1.instance().deployments().get(productionUsCentral1.zone(main)).version()); assertEquals(revision2, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.triggerJobs(); app1.assertNotRunning(productionUsCentral1); app1.runJob(systemTest).runJob(stagingTest).runJob(productionEuWest1); assertFalse(app1.instance().change().hasTargets()); assertFalse(app1.instanceJobs().get(productionUsCentral1).isSuccess()); } @Test public void eachParallelDeployTargetIsTested() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .parallel("eu-west-1", "us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); app.timeOutConvergence(productionEuWest1); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(v2, app.deployment(productionEuWest1.zone(main)).version()); assertEquals(v1, app.deployment(productionUsEast3.zone(main)).version()); app.submit(applicationPackage); tester.triggerJobs(); Version firstTested = app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform(); assertEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertNotEquals(firstTested, app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform()); assertNotEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); app.triggerJobs().jobAborted(productionUsEast3); app.failDeployment(productionEuWest1).failDeployment(productionUsEast3) .runJob(productionEuWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); assertEquals(2, app.instanceJobs().get(productionEuWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); } @Test public void retriesFailingJobs() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage).runJob(stagingTest).failDeployment(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertRunning(systemTest); tester.clock().advance(Duration.ofSeconds(1)); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofMinutes(10).plus(Duration.ofSeconds(1))); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.clock().advance(Duration.ofMinutes(15)); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofSeconds(2)); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); app.submit(applicationPackage).deploy(); assertTrue("Deployment completed", tester.jobs().active().isEmpty()); } @Test public void testPlatformVersionSelection() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); Version version1 = tester.controller().readSystemVersion(); var app1 = tester.newDeploymentContext(); app1.submit(applicationPackage).deploy(); assertEquals("First deployment gets system version", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); Version version2 = new Version(version1.getMajor(), version1.getMinor() + 1); tester.controllerTester().upgradeSystem(version2); applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); app1.submit(applicationPackage).deploy(); assertEquals("Application change preserves version, and new region gets oldest version too", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); assertFalse("Change deployed", app1.instance().change().hasTargets()); tester.upgrader().maintain(); app1.deployPlatform(version2); assertEquals("Version upgrade changes version", version2, app1.application().oldestDeployedPlatform().get()); assertEquals(version2, tester.configServer().lastPrepareVersion().get()); } @Test public void requeueOutOfCapacityStagingJob() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); var app1 = tester.newDeploymentContext("tenant1", "app1", "default").submit(applicationPackage); var app2 = tester.newDeploymentContext("tenant2", "app2", "default").submit(applicationPackage); var app3 = tester.newDeploymentContext("tenant3", "app3", "default").submit(applicationPackage); app2.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app1.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app3.runJob(systemTest); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); tester.abortAll(); assertEquals(List.of(), tester.jobs().active()); tester.readyJobsTrigger().maintain(); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app3.outOfCapacity(stagingTest); app1.abortJob(stagingTest); app2.abortJob(stagingTest); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app2.deploy(); app3.deploy(); app1.runJob(stagingTest); assertEquals(0, tester.jobs().active().size()); tester.controllerTester().upgradeSystem(new Version("6.2")); tester.upgrader().maintain(); app1.submit(applicationPackage); tester.readyJobsTrigger().run(); app1.assertRunning(systemTest); app1.assertRunning(stagingTest); assertEquals(2, tester.jobs().active().size()); tester.triggerJobs(); app3.outOfCapacity(systemTest); app1.abortJob(systemTest); app1.abortJob(stagingTest); app2.abortJob(systemTest); app2.abortJob(stagingTest); app3.abortJob(stagingTest); assertEquals(0, tester.jobs().active().size()); assertTrue(app1.instance().change().application().isPresent()); assertFalse(app2.instance().change().application().isPresent()); assertFalse(app3.instance().change().application().isPresent()); tester.readyJobsTrigger().maintain(); app1.assertRunning(stagingTest); app3.assertRunning(systemTest); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app1.assertRunning(systemTest); assertEquals(4, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); app2.assertRunning(stagingTest); app2.assertRunning(systemTest); assertEquals(6, tester.jobs().active().size()); } @Test public void testUserInstancesNotInDeploymentSpec() { var app = tester.newDeploymentContext(); tester.controller().applications().createInstance(app.application().id().instance("user")); app.submit().deploy(); } @Test @Test public void testMultipleInstances() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("instance1,instance2") .region("us-east-3") .build(); var app = tester.newDeploymentContext("tenant1", "application1", "instance1") .submit(applicationPackage) .completeRollout(); assertEquals(2, app.application().instances().size()); assertEquals(2, app.application().productionDeployments().values().stream() .mapToInt(Collection::size) .sum()); } @Test public void testDeclaredProductionTests() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .delay(Duration.ofMinutes(1)) .test("us-east-3") .region("us-west-1") .region("us-central-1") .test("us-central-1") .test("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(1)); app.runJob(testUsEast3) .runJob(productionUsWest1).runJob(productionUsCentral1) .runJob(testUsCentral1).runJob(testUsWest1); assertEquals(Change.empty(), app.instance().change()); Version version0 = app.application().oldestDeployedPlatform().get(); Version version1 = Version.fromString("7.7"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.triggerJobs(); app.assertRunning(testUsEast3); tester.upgrader().overrideConfidence(version1, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); app.failDeployment(testUsEast3); app.assertNotRunning(testUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version0).withPin()); app.runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.clock().advance(Duration.ofMinutes(11)); app.runJob(testUsEast3); assertEquals(Change.empty().withPin(), app.instance().change()); } @Test public void testDeployComplicatedDeploymentSpec() { String complicatedDeploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <parallel>\n" + " <instance id='instance' athenz-service='in-service'>\n" + " <staging />\n" + " <prod>\n" + " <parallel>\n" + " <region active='true'>us-west-1</region>\n" + " <steps>\n" + " <region active='true'>us-east-3</region>\n" + " <delay hours='2' />\n" + " <region active='true'>eu-west-1</region>\n" + " <delay hours='2' />\n" + " </steps>\n" + " <steps>\n" + " <delay hours='3' />\n" + " <region active='true'>aws-us-east-1a</region>\n" + " <parallel>\n" + " <region active='true' athenz-service='no-service'>ap-northeast-1</region>\n" + " <region active='true'>ap-northeast-2</region>\n" + " <test>aws-us-east-1a</test>\n" + " </parallel>\n" + " </steps>\n" + " <delay hours='3' minutes='30' />\n" + " </parallel>\n" + " <parallel>\n" + " <test>ap-northeast-2</test>\n" + " <test>ap-northeast-1</test>\n" + " </parallel>\n" + " <test>us-east-3</test>\n" + " <region active='true'>ap-southeast-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id='foo' container-id='bar'>\n" + " <region>us-east-3</region>\n" + " </endpoint>\n" + " <endpoint id='nalle' container-id='frosk' />\n" + " <endpoint container-id='quux' />\n" + " </endpoints>\n" + " </instance>\n" + " <instance id='other'>\n" + " <upgrade policy='conservative' />\n" + " <test />\n" + " <block-change revision='true' version='false' days='sat' hours='0-23' time-zone='CET' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " <notifications when='failing'>\n" + " <email role='author' />\n" + " <email address='john@dev' when='failing-commit' />\n" + " <email address='jane@dev' />\n" + " </notifications>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='last'>\n" + " <upgrade policy='conservative' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(complicatedDeploymentSpec); var app1 = tester.newDeploymentContext("t", "a", "instance").submit(applicationPackage); var app2 = tester.newDeploymentContext("t", "a", "other"); var app3 = tester.newDeploymentContext("t", "a", "last"); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(systemTest); app1.runJob(productionUsWest1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.assertNotRunning(testEuWest1); app2.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(testEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app1.runJob(testAwsUsEast1a); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApNortheast1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); app1.runJob(testApNortheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApSoutheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.atMondayMorning().clock().advance(Duration.ofDays(5)); Version version = Version.fromString("8.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); assertEquals(Change.of(version), app1.instance().change()); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app2.failDeployment(systemTest); app1.submit(applicationPackage); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app1.instance().change()); app2.runJob(systemTest); app1.jobAborted(stagingTest) .runJob(stagingTest) .runJob(productionUsWest1) .runJob(productionUsEast3); app1.runJob(stagingTest); app2.runJob(systemTest); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); tester.triggerJobs(); app1.runJob(testAwsUsEast1a); app1.runJob(productionApNortheast2); app1.runJob(productionApNortheast1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(testApNortheast1); app1.runJob(testApNortheast2); app1.runJob(testUsEast3); app1.runJob(productionApSoutheast1); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app1.runJob(stagingTest); app2.runJob(systemTest) .runJob(productionEuWest1) .failDeployment(testEuWest1); tester.clock().advance(Duration.ofDays(1)); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(0, tester.jobs().active().size()); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app2.instance().change()); app2.runJob(productionEuWest1) .runJob(testEuWest1); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(version), app3.instance().change()); tester.deploymentTrigger().cancelChange(app3.instanceId(), ALL); tester.outstandingChangeDeployer().run(); tester.upgrader().maintain(); assertEquals(Change.of(app1.application().latestVersion().get()), app3.instance().change()); app3.runJob(productionEuWest1); tester.upgrader().maintain(); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); assertEquals(Change.empty(), app3.instance().change()); } @Test public void testChangeCompletion() { var app = tester.newDeploymentContext().submit().deploy(); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsCentral1); app.submit(); tester.triggerJobs(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(version), app.instance().change()); app.runJob(productionUsEast3).runJob(productionUsWest1); tester.triggerJobs(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(app.lastSubmission().get()), app.instance().change()); } @Test public void mixedDirectAndPipelineJobsInProduction() { ApplicationPackage cdPackage = new ApplicationPackageBuilder().region("cd-us-east-1") .region("cd-aws-us-east-1a") .build(); var zones = List.of(ZoneId.from("test.cd-us-west-1"), ZoneId.from("staging.cd-us-west-1"), ZoneId.from("prod.cd-us-east-1"), ZoneId.from("prod.cd-aws-us-east-1a")); tester.controllerTester() .setZones(zones, SystemName.cd) .setRoutingMethod(zones, RoutingMethod.shared); tester.controllerTester().upgradeSystem(Version.fromString("6.1")); tester.controllerTester().computeVersionStatus(); var app = tester.newDeploymentContext(); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(systemTest) .jobAborted(stagingTest) .runJob(systemTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .jobAborted(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); } @Test public void testsInSeparateInstance() { String deploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id='canary'>\n" + " <upgrade policy='canary' />\n" + " <test />\n" + " <staging />\n" + " </instance>\n" + " <instance id='default'>\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentSpec); var canary = tester.newDeploymentContext("t", "a", "canary").submit(applicationPackage); var conservative = tester.newDeploymentContext("t", "a", "default"); canary.runJob(systemTest) .runJob(stagingTest); conservative.runJob(productionEuWest1) .runJob(testEuWest1); canary.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest); tester.outstandingChangeDeployer().run(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); tester.controllerTester().upgradeSystem(new Version("7.7.7")); tester.upgrader().maintain(); canary.runJob(systemTest) .runJob(stagingTest); tester.upgrader().maintain(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); } @Test public void testEagerTests() { var app = tester.newDeploymentContext().submit().deploy(); Version version1 = new Version("7.8.9"); ApplicationVersion build1 = app.lastSubmission().get(); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(stagingTest); app.submit(); ApplicationVersion build2 = app.lastSubmission().get(); assertNotEquals(build1, build2); tester.triggerJobs(); app.assertRunning(stagingTest); assertEquals(version1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetPlatform()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetApplication()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourcePlatform().get()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourceApplication().get()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); assertEquals(build2, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetApplication()); app.runJob(systemTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); tester.outstandingChangeDeployer().run(); assertEquals(RunStatus.running, tester.jobs().last(app.instanceId(), stagingTest).get().status()); app.runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(stagingTest); } @Test public void testTriggeringOfIdleTestJobsWhenFirstDeploymentIsOnNewerVersionThanChange() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder().systemTest() .stagingTest() .region("us-east-3") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); var appToAvoidVersionGC = tester.newDeploymentContext("g", "c", "default").submit().deploy(); Version version2 = new Version("7.8.9"); Version version3 = new Version("8.9.10"); tester.controllerTester().upgradeSystem(version2); tester.deploymentTrigger().triggerChange(appToAvoidVersionGC.instanceId(), Change.of(version2)); appToAvoidVersionGC.deployPlatform(version2); tester.controllerTester().upgradeSystem(version3); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version3)); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.upgrader().overrideConfidence(version3, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().run(); assertEquals(Optional.of(version2), app.instance().change().platform()); app.runJob(systemTest) .runJob(productionUsEast3) .runJob(stagingTest) .runJob(productionUsWest1); assertEquals(version3, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetPlatform()); assertEquals(version2, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetPlatform()); assertEquals(Map.of(), app.deploymentStatus().jobsToRun()); assertEquals(Change.empty(), app.instance().change()); assertEquals(List.of(), tester.jobs().active()); } @Test public void testRetriggerQueue() { var app = tester.newDeploymentContext().submit().deploy(); app.submit(); tester.triggerJobs(); tester.deploymentTrigger().reTrigger(app.instanceId(), productionUsEast3); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries(); Assert.assertEquals(1, retriggerEntries.size()); } }
Horrible keyboardss on the 2018 models :)
public void testMultipleInstancesWithDifferentChanges() { DeploymentContext i1 = tester.newDeploymentContext("t", "a", "i1"); DeploymentContext i2 = tester.newDeploymentContext("t", "a", "i2"); DeploymentContext i3 = tester.newDeploymentContext("t", "a", "i3"); DeploymentContext i4 = tester.newDeploymentContext("t", "a", "i4"); ApplicationPackage applicationPackage = ApplicationPackageBuilder .fromDeploymentXml("<deployment version='1'>\n" + " <parallel>\n" + " <instance id='i1'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='6' />\n" + " </prod>\n" + " </instance>\n" + " <instance id='i2'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='i3'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='18' />\n" + " <test>us-east-3</test>\n" + " </prod>\n" + " </instance>\n" + " <instance id='i4'>\n" + " <test />\n" + " <staging />\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"); i1.submit(applicationPackage); Optional<ApplicationVersion> v0 = i1.lastSubmission(); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().change().application()); assertEquals(v0, i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().latestDeployed()); assertEquals(v0, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(6)); tester.outstandingChangeDeployer().run(); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i3.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(12)); i1.submit(applicationPackage); Optional<ApplicationVersion> v1 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); assertEquals(v1, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(3)); i1.submit(applicationPackage); Optional<ApplicationVersion> v2 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(3)); assertEquals(v0, i3.instance().change().application()); i3.runJob(testUsEast3); assertEquals(Optional.empty(), i3.instance().change().application()); tester.outstandingChangeDeployer().run(); assertEquals(v2, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(v0, i3.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(v2, i2.instance().change().application()); assertEquals(v1, i3.instance().change().application()); assertEquals(v0, i4.instance().change().application()); }
public void testMultipleInstancesWithDifferentChanges() { DeploymentContext i1 = tester.newDeploymentContext("t", "a", "i1"); DeploymentContext i2 = tester.newDeploymentContext("t", "a", "i2"); DeploymentContext i3 = tester.newDeploymentContext("t", "a", "i3"); DeploymentContext i4 = tester.newDeploymentContext("t", "a", "i4"); ApplicationPackage applicationPackage = ApplicationPackageBuilder .fromDeploymentXml("<deployment version='1'>\n" + " <parallel>\n" + " <instance id='i1'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='6' />\n" + " </prod>\n" + " </instance>\n" + " <instance id='i2'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='i3'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='18' />\n" + " <test>us-east-3</test>\n" + " </prod>\n" + " </instance>\n" + " <instance id='i4'>\n" + " <test />\n" + " <staging />\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"); i1.submit(applicationPackage); Optional<ApplicationVersion> v0 = i1.lastSubmission(); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().change().application()); assertEquals(v0, i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().latestDeployed()); assertEquals(v0, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(6)); tester.outstandingChangeDeployer().run(); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i3.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(12)); i1.submit(applicationPackage); Optional<ApplicationVersion> v1 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); assertEquals(v1, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(3)); i1.submit(applicationPackage); Optional<ApplicationVersion> v2 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(3)); assertEquals(v0, i3.instance().change().application()); i3.runJob(testUsEast3); assertEquals(Optional.empty(), i3.instance().change().application()); tester.outstandingChangeDeployer().run(); assertEquals(v2, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(v0, i3.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(v2, i2.instance().change().application()); assertEquals(v1, i3.instance().change().application()); assertEquals(v0, i4.instance().change().application()); }
class DeploymentTriggerTest { private final DeploymentTester tester = new DeploymentTester(); @Test public void testTriggerFailing() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version version = Version.fromString("6.3"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.failDeployment(stagingTest); tester.triggerJobs(); assertEquals("Retried dead job", 2, tester.jobs().active().size()); app.assertRunning(stagingTest); app.runJob(stagingTest); app.assertRunning(systemTest); assertEquals(1, tester.jobs().active().size()); app.timeOutUpgrade(systemTest); tester.triggerJobs(); assertEquals("Job is retried on failure", 1, tester.jobs().active().size()); app.runJob(systemTest); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.applications().lockApplicationOrThrow(app.application().id(), locked -> tester.applications().store(locked.withProjectId(OptionalLong.empty()))); app.timeOutConvergence(productionUsWest1); tester.triggerJobs(); assertEquals("Job is not triggered when no projectId is present", 0, tester.jobs().active().size()); } @Test public void leadingUpgradeAllowsApplicationChangeWhileUpgrading() { var applicationPackage = new ApplicationPackageBuilder().region("us-east-3") .upgradeRollout("leading") .build(); var app = tester.newDeploymentContext(); app.submit(applicationPackage).deploy(); Change upgrade = Change.of(new Version("7.8.9")); tester.controllerTester().upgradeSystem(upgrade.platform().get()); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertRunning(productionUsEast3); assertEquals(upgrade, app.instance().change()); app.submit(applicationPackage); assertEquals(upgrade.with(app.lastSubmission().get()), app.instance().change()); } @Test public void abortsJobsOnNewApplicationChange() { var app = tester.newDeploymentContext(); app.submit() .runJob(systemTest) .runJob(stagingTest); tester.triggerJobs(); RunId id = tester.jobs().last(app.instanceId(), productionUsCentral1).get().id(); assertTrue(tester.jobs().active(id).isPresent()); app.submit(); assertTrue(tester.jobs().active(id).isPresent()); tester.triggerJobs(); tester.runner().run(); assertTrue(tester.jobs().active(id).isPresent()); app.runJob(systemTest).runJob(stagingTest).runJob(stagingTest); tester.triggerJobs(); app.jobAborted(productionUsCentral1); app.runJob(productionUsCentral1).runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.controllerTester().upgradeSystem(new Version("8.9")); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.submit(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.runner().run(); assertEquals(EnumSet.of(productionUsCentral1), tester.jobs().active().stream() .map(run -> run.id().type()) .collect(Collectors.toCollection(() -> EnumSet.noneOf(JobType.class)))); } @Test public void deploymentSpecWithDelays() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .systemTest() .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest); tester.clock().advance(Duration.ofSeconds(15)); app.runJob(stagingTest); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); tester.clock().advance(Duration.ofSeconds(15)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.triggerJobs(); app.runJob(productionUsCentral1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.triggerJobs(); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionUsCentral1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionEuWest1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void testNoOtherChangesDuringSuspension() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .build(); var application = tester.newDeploymentContext().submit().deploy(); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), true); application.submit() .runJob(systemTest) .runJob(stagingTest) .runJob(productionUsCentral1); tester.triggerJobs(); application.assertNotRunning(productionUsEast3); application.assertNotRunning(productionUsWest1); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), false); tester.triggerJobs(); application.runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), application.instance().change()); } @Test public void testBlockRevisionChange() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); Version version = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.clock().advance(Duration.ofHours(1)); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); app.submit(applicationPackage); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(systemTest).runJob(stagingTest); tester.outstandingChangeDeployer().run(); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); assertEquals(emptyList(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(2)); tester.outstandingChangeDeployer().run(); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); app.assertRunning(productionUsWest1); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(true, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.submit(applicationPackage); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); tester.outstandingChangeDeployer().run(); app.runJob(productionUsWest1); assertEquals(1, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.deploymentStatus().outstandingChange(app.instance().name()).application().get().buildNumber().getAsLong()); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app.runJob(productionUsEast3); assertEquals(2, tester.jobs().active().size()); assertEquals(Change.empty(), app.instance().change()); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); tester.outstandingChangeDeployer().run(); assertTrue(app.instance().change().hasTargets()); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(productionUsWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); } @Test public void testJobPause() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.controllerTester().upgradeSystem(new Version("9.8.7")); tester.upgrader().maintain(); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsEast3, tester.clock().instant().plus(Duration.ofSeconds(3))); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1500)); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); app.failDeployment(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1000)); tester.triggerJobs(); app.runJob(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsEast3); tester.deploymentTrigger().forceTrigger(app.instanceId(), productionUsEast3, "mrTrigger", true); app.assertRunning(productionUsEast3); assertFalse(app.instance().jobPause(productionUsEast3).isPresent()); } @Test public void applicationVersionIsNotDowngraded() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest) .timeOutUpgrade(productionUsCentral1); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(Change.of(appVersion1), app.instance().change()); tester.deploymentTrigger().cancelChange(app.instanceId(), ALL); assertEquals(Change.empty(), app.instance().change()); Version version1 = new Version("6.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).failDeployment(productionUsCentral1); app.runJob(systemTest).runJob(stagingTest); app.runJob(productionUsCentral1).runJob(productionEuWest1); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test public void downgradingApplicationVersionWorks() { var app = tester.newDeploymentContext().submit().deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion0)); assertEquals(Change.of(appVersion0), app.instance().change()); app.runJob(stagingTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); assertEquals(Change.empty(), app.instance().change()); assertEquals(appVersion0, app.instance().deployments().get(productionUsEast3.zone(tester.controller().system())).applicationVersion()); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); } @Test public void settingANoOpChangeIsANoOp() { var app = tester.newDeploymentContext().submit(); assertEquals(Optional.empty(), app.instance().latestDeployed()); app.deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion1)); assertEquals(Change.empty(), app.instance().change()); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); } @Test public void stepIsCompletePreciselyWhenItShouldBe() { var app1 = tester.newDeploymentContext("tenant1", "app1", "default"); var app2 = tester.newDeploymentContext("tenant1", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); Version version0 = Version.fromString("7.0"); tester.controllerTester().upgradeSystem(version0); app1.submit(applicationPackage).deploy(); app2.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app2.deployPlatform(version1); tester.deploymentTrigger().cancelChange(app1.instanceId(), ALL); Version version2 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); tester.triggerJobs(); app1.jobAborted(systemTest).jobAborted(stagingTest); app1.runJob(systemTest).runJob(stagingTest).timeOutConvergence(productionUsCentral1); assertEquals(version2, app1.deployment(productionUsCentral1.zone(main)).version()); Instant triggered = app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start(); tester.clock().advance(Duration.ofHours(1)); tester.upgrader().overrideConfidence(version2, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); assertEquals("Change becomes latest non-broken version", Change.of(version1), app1.instance().change()); app1.runJob(systemTest).runJob(stagingTest) .failDeployment(productionEuWest1); assertEquals(triggered, app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start()); ApplicationVersion revision1 = app1.lastSubmission().get(); app1.submit(applicationPackage); ApplicationVersion revision2 = app1.lastSubmission().get(); app1.runJob(systemTest).runJob(stagingTest); assertEquals(Change.of(version1).with(revision2), app1.instance().change()); tester.triggerJobs(); app1.assertRunning(productionUsCentral1); assertEquals(version2, app1.instance().deployments().get(productionUsCentral1.zone(main)).version()); assertEquals(revision1, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); assertTrue(triggered.isBefore(app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start())); app1.timeOutUpgrade(productionUsCentral1); assertEquals(version2, app1.instance().deployments().get(productionUsCentral1.zone(main)).version()); assertEquals(revision2, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.triggerJobs(); app1.assertNotRunning(productionUsCentral1); app1.runJob(systemTest).runJob(stagingTest).runJob(productionEuWest1); assertFalse(app1.instance().change().hasTargets()); assertFalse(app1.instanceJobs().get(productionUsCentral1).isSuccess()); } @Test public void eachParallelDeployTargetIsTested() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .parallel("eu-west-1", "us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); app.timeOutConvergence(productionEuWest1); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(v2, app.deployment(productionEuWest1.zone(main)).version()); assertEquals(v1, app.deployment(productionUsEast3.zone(main)).version()); app.submit(applicationPackage); tester.triggerJobs(); Version firstTested = app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform(); assertEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertNotEquals(firstTested, app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform()); assertNotEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); app.triggerJobs().jobAborted(productionUsEast3); app.failDeployment(productionEuWest1).failDeployment(productionUsEast3) .runJob(productionEuWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); assertEquals(2, app.instanceJobs().get(productionEuWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); } @Test public void retriesFailingJobs() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage).runJob(stagingTest).failDeployment(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertRunning(systemTest); tester.clock().advance(Duration.ofSeconds(1)); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofMinutes(10).plus(Duration.ofSeconds(1))); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.clock().advance(Duration.ofMinutes(15)); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofSeconds(2)); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); app.submit(applicationPackage).deploy(); assertTrue("Deployment completed", tester.jobs().active().isEmpty()); } @Test public void testPlatformVersionSelection() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); Version version1 = tester.controller().readSystemVersion(); var app1 = tester.newDeploymentContext(); app1.submit(applicationPackage).deploy(); assertEquals("First deployment gets system version", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); Version version2 = new Version(version1.getMajor(), version1.getMinor() + 1); tester.controllerTester().upgradeSystem(version2); applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); app1.submit(applicationPackage).deploy(); assertEquals("Application change preserves version, and new region gets oldest version too", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); assertFalse("Change deployed", app1.instance().change().hasTargets()); tester.upgrader().maintain(); app1.deployPlatform(version2); assertEquals("Version upgrade changes version", version2, app1.application().oldestDeployedPlatform().get()); assertEquals(version2, tester.configServer().lastPrepareVersion().get()); } @Test public void requeueOutOfCapacityStagingJob() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); var app1 = tester.newDeploymentContext("tenant1", "app1", "default").submit(applicationPackage); var app2 = tester.newDeploymentContext("tenant2", "app2", "default").submit(applicationPackage); var app3 = tester.newDeploymentContext("tenant3", "app3", "default").submit(applicationPackage); app2.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app1.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app3.runJob(systemTest); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); tester.abortAll(); assertEquals(List.of(), tester.jobs().active()); tester.readyJobsTrigger().maintain(); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app3.outOfCapacity(stagingTest); app1.abortJob(stagingTest); app2.abortJob(stagingTest); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app2.deploy(); app3.deploy(); app1.runJob(stagingTest); assertEquals(0, tester.jobs().active().size()); tester.controllerTester().upgradeSystem(new Version("6.2")); tester.upgrader().maintain(); app1.submit(applicationPackage); tester.readyJobsTrigger().run(); app1.assertRunning(systemTest); app1.assertRunning(stagingTest); assertEquals(2, tester.jobs().active().size()); tester.triggerJobs(); app3.outOfCapacity(systemTest); app1.abortJob(systemTest); app1.abortJob(stagingTest); app2.abortJob(systemTest); app2.abortJob(stagingTest); app3.abortJob(stagingTest); assertEquals(0, tester.jobs().active().size()); assertTrue(app1.instance().change().application().isPresent()); assertFalse(app2.instance().change().application().isPresent()); assertFalse(app3.instance().change().application().isPresent()); tester.readyJobsTrigger().maintain(); app1.assertRunning(stagingTest); app3.assertRunning(systemTest); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app1.assertRunning(systemTest); assertEquals(4, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); app2.assertRunning(stagingTest); app2.assertRunning(systemTest); assertEquals(6, tester.jobs().active().size()); } @Test public void testUserInstancesNotInDeploymentSpec() { var app = tester.newDeploymentContext(); tester.controller().applications().createInstance(app.application().id().instance("user")); app.submit().deploy(); } @Test @Test public void testMultipleInstances() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("instance1,instance2") .region("us-east-3") .build(); var app = tester.newDeploymentContext("tenant1", "application1", "instance1") .submit(applicationPackage) .completeRollout(); assertEquals(2, app.application().instances().size()); assertEquals(2, app.application().productionDeployments().values().stream() .mapToInt(Collection::size) .sum()); } @Test public void testDeclaredProductionTests() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .delay(Duration.ofMinutes(1)) .test("us-east-3") .region("us-west-1") .region("us-central-1") .test("us-central-1") .test("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(1)); app.runJob(testUsEast3) .runJob(productionUsWest1).runJob(productionUsCentral1) .runJob(testUsCentral1).runJob(testUsWest1); assertEquals(Change.empty(), app.instance().change()); Version version0 = app.application().oldestDeployedPlatform().get(); Version version1 = Version.fromString("7.7"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.triggerJobs(); app.assertRunning(testUsEast3); tester.upgrader().overrideConfidence(version1, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); app.failDeployment(testUsEast3); app.assertNotRunning(testUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version0).withPin()); app.runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.clock().advance(Duration.ofMinutes(11)); app.runJob(testUsEast3); assertEquals(Change.empty().withPin(), app.instance().change()); } @Test public void testDeployComplicatedDeploymentSpec() { String complicatedDeploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <parallel>\n" + " <instance id='instance' athenz-service='in-service'>\n" + " <staging />\n" + " <prod>\n" + " <parallel>\n" + " <region active='true'>us-west-1</region>\n" + " <steps>\n" + " <region active='true'>us-east-3</region>\n" + " <delay hours='2' />\n" + " <region active='true'>eu-west-1</region>\n" + " <delay hours='2' />\n" + " </steps>\n" + " <steps>\n" + " <delay hours='3' />\n" + " <region active='true'>aws-us-east-1a</region>\n" + " <parallel>\n" + " <region active='true' athenz-service='no-service'>ap-northeast-1</region>\n" + " <region active='true'>ap-northeast-2</region>\n" + " <test>aws-us-east-1a</test>\n" + " </parallel>\n" + " </steps>\n" + " <delay hours='3' minutes='30' />\n" + " </parallel>\n" + " <parallel>\n" + " <test>ap-northeast-2</test>\n" + " <test>ap-northeast-1</test>\n" + " </parallel>\n" + " <test>us-east-3</test>\n" + " <region active='true'>ap-southeast-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id='foo' container-id='bar'>\n" + " <region>us-east-3</region>\n" + " </endpoint>\n" + " <endpoint id='nalle' container-id='frosk' />\n" + " <endpoint container-id='quux' />\n" + " </endpoints>\n" + " </instance>\n" + " <instance id='other'>\n" + " <upgrade policy='conservative' />\n" + " <test />\n" + " <block-change revision='true' version='false' days='sat' hours='0-23' time-zone='CET' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " <notifications when='failing'>\n" + " <email role='author' />\n" + " <email address='john@dev' when='failing-commit' />\n" + " <email address='jane@dev' />\n" + " </notifications>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='last'>\n" + " <upgrade policy='conservative' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(complicatedDeploymentSpec); var app1 = tester.newDeploymentContext("t", "a", "instance").submit(applicationPackage); var app2 = tester.newDeploymentContext("t", "a", "other"); var app3 = tester.newDeploymentContext("t", "a", "last"); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(systemTest); app1.runJob(productionUsWest1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.assertNotRunning(testEuWest1); app2.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(testEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app1.runJob(testAwsUsEast1a); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApNortheast1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); app1.runJob(testApNortheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApSoutheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.atMondayMorning().clock().advance(Duration.ofDays(5)); Version version = Version.fromString("8.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); assertEquals(Change.of(version), app1.instance().change()); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app2.failDeployment(systemTest); app1.submit(applicationPackage); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app1.instance().change()); app2.runJob(systemTest); app1.jobAborted(stagingTest) .runJob(stagingTest) .runJob(productionUsWest1) .runJob(productionUsEast3); app1.runJob(stagingTest); app2.runJob(systemTest); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); tester.triggerJobs(); app1.runJob(testAwsUsEast1a); app1.runJob(productionApNortheast2); app1.runJob(productionApNortheast1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(testApNortheast1); app1.runJob(testApNortheast2); app1.runJob(testUsEast3); app1.runJob(productionApSoutheast1); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app1.runJob(stagingTest); app2.runJob(systemTest) .runJob(productionEuWest1) .failDeployment(testEuWest1); tester.clock().advance(Duration.ofDays(1)); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(0, tester.jobs().active().size()); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app2.instance().change()); app2.runJob(productionEuWest1) .runJob(testEuWest1); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(version), app3.instance().change()); tester.deploymentTrigger().cancelChange(app3.instanceId(), ALL); tester.outstandingChangeDeployer().run(); tester.upgrader().maintain(); assertEquals(Change.of(app1.application().latestVersion().get()), app3.instance().change()); app3.runJob(productionEuWest1); tester.upgrader().maintain(); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); assertEquals(Change.empty(), app3.instance().change()); } @Test public void testChangeCompletion() { var app = tester.newDeploymentContext().submit().deploy(); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsCentral1); app.submit(); tester.triggerJobs(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(version), app.instance().change()); app.runJob(productionUsEast3).runJob(productionUsWest1); tester.triggerJobs(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(app.lastSubmission().get()), app.instance().change()); } @Test public void mixedDirectAndPipelineJobsInProduction() { ApplicationPackage cdPackage = new ApplicationPackageBuilder().region("cd-us-east-1") .region("cd-aws-us-east-1a") .build(); var zones = List.of(ZoneId.from("test.cd-us-west-1"), ZoneId.from("staging.cd-us-west-1"), ZoneId.from("prod.cd-us-east-1"), ZoneId.from("prod.cd-aws-us-east-1a")); tester.controllerTester() .setZones(zones, SystemName.cd) .setRoutingMethod(zones, RoutingMethod.shared); tester.controllerTester().upgradeSystem(Version.fromString("6.1")); tester.controllerTester().computeVersionStatus(); var app = tester.newDeploymentContext(); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(systemTest) .jobAborted(stagingTest) .runJob(systemTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .jobAborted(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); } @Test public void testsInSeparateInstance() { String deploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id='canary'>\n" + " <upgrade policy='canary' />\n" + " <test />\n" + " <staging />\n" + " </instance>\n" + " <instance id='default'>\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentSpec); var canary = tester.newDeploymentContext("t", "a", "canary").submit(applicationPackage); var conservative = tester.newDeploymentContext("t", "a", "default"); canary.runJob(systemTest) .runJob(stagingTest); conservative.runJob(productionEuWest1) .runJob(testEuWest1); canary.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest); tester.outstandingChangeDeployer().run(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); tester.controllerTester().upgradeSystem(new Version("7.7.7")); tester.upgrader().maintain(); canary.runJob(systemTest) .runJob(stagingTest); tester.upgrader().maintain(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); } @Test public void testEagerTests() { var app = tester.newDeploymentContext().submit().deploy(); Version version1 = new Version("7.8.9"); ApplicationVersion build1 = app.lastSubmission().get(); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(stagingTest); app.submit(); ApplicationVersion build2 = app.lastSubmission().get(); assertNotEquals(build1, build2); tester.triggerJobs(); app.assertRunning(stagingTest); assertEquals(version1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetPlatform()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetApplication()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourcePlatform().get()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourceApplication().get()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); assertEquals(build2, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetApplication()); app.runJob(systemTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); tester.outstandingChangeDeployer().run(); assertEquals(RunStatus.running, tester.jobs().last(app.instanceId(), stagingTest).get().status()); app.runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(stagingTest); } @Test public void testTriggeringOfIdleTestJobsWhenFirstDeploymentIsOnNewerVersionThanChange() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder().systemTest() .stagingTest() .region("us-east-3") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); var appToAvoidVersionGC = tester.newDeploymentContext("g", "c", "default").submit().deploy(); Version version2 = new Version("7.8.9"); Version version3 = new Version("8.9.10"); tester.controllerTester().upgradeSystem(version2); tester.deploymentTrigger().triggerChange(appToAvoidVersionGC.instanceId(), Change.of(version2)); appToAvoidVersionGC.deployPlatform(version2); tester.controllerTester().upgradeSystem(version3); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version3)); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.upgrader().overrideConfidence(version3, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().run(); assertEquals(Optional.of(version2), app.instance().change().platform()); app.runJob(systemTest) .runJob(productionUsEast3) .runJob(stagingTest) .runJob(productionUsWest1); assertEquals(version3, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetPlatform()); assertEquals(version2, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetPlatform()); assertEquals(Map.of(), app.deploymentStatus().jobsToRun()); assertEquals(Change.empty(), app.instance().change()); assertEquals(List.of(), tester.jobs().active()); } @Test public void testRetriggerQueue() { var app = tester.newDeploymentContext().submit().deploy(); app.submit(); tester.triggerJobs(); tester.deploymentTrigger().reTrigger(app.instanceId(), productionUsEast3); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries(); Assert.assertEquals(1, retriggerEntries.size()); } }
class DeploymentTriggerTest { private final DeploymentTester tester = new DeploymentTester(); @Test public void testTriggerFailing() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version version = Version.fromString("6.3"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.failDeployment(stagingTest); tester.triggerJobs(); assertEquals("Retried dead job", 2, tester.jobs().active().size()); app.assertRunning(stagingTest); app.runJob(stagingTest); app.assertRunning(systemTest); assertEquals(1, tester.jobs().active().size()); app.timeOutUpgrade(systemTest); tester.triggerJobs(); assertEquals("Job is retried on failure", 1, tester.jobs().active().size()); app.runJob(systemTest); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.applications().lockApplicationOrThrow(app.application().id(), locked -> tester.applications().store(locked.withProjectId(OptionalLong.empty()))); app.timeOutConvergence(productionUsWest1); tester.triggerJobs(); assertEquals("Job is not triggered when no projectId is present", 0, tester.jobs().active().size()); } @Test public void leadingUpgradeAllowsApplicationChangeWhileUpgrading() { var applicationPackage = new ApplicationPackageBuilder().region("us-east-3") .upgradeRollout("leading") .build(); var app = tester.newDeploymentContext(); app.submit(applicationPackage).deploy(); Change upgrade = Change.of(new Version("7.8.9")); tester.controllerTester().upgradeSystem(upgrade.platform().get()); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertRunning(productionUsEast3); assertEquals(upgrade, app.instance().change()); app.submit(applicationPackage); assertEquals(upgrade.with(app.lastSubmission().get()), app.instance().change()); } @Test public void abortsJobsOnNewApplicationChange() { var app = tester.newDeploymentContext(); app.submit() .runJob(systemTest) .runJob(stagingTest); tester.triggerJobs(); RunId id = tester.jobs().last(app.instanceId(), productionUsCentral1).get().id(); assertTrue(tester.jobs().active(id).isPresent()); app.submit(); assertTrue(tester.jobs().active(id).isPresent()); tester.triggerJobs(); tester.runner().run(); assertTrue(tester.jobs().active(id).isPresent()); app.runJob(systemTest).runJob(stagingTest).runJob(stagingTest); tester.triggerJobs(); app.jobAborted(productionUsCentral1); app.runJob(productionUsCentral1).runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.controllerTester().upgradeSystem(new Version("8.9")); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.submit(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.runner().run(); assertEquals(EnumSet.of(productionUsCentral1), tester.jobs().active().stream() .map(run -> run.id().type()) .collect(Collectors.toCollection(() -> EnumSet.noneOf(JobType.class)))); } @Test public void deploymentSpecWithDelays() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .systemTest() .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest); tester.clock().advance(Duration.ofSeconds(15)); app.runJob(stagingTest); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); tester.clock().advance(Duration.ofSeconds(15)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.triggerJobs(); app.runJob(productionUsCentral1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.triggerJobs(); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionUsCentral1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionEuWest1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void testNoOtherChangesDuringSuspension() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .build(); var application = tester.newDeploymentContext().submit().deploy(); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), true); application.submit() .runJob(systemTest) .runJob(stagingTest) .runJob(productionUsCentral1); tester.triggerJobs(); application.assertNotRunning(productionUsEast3); application.assertNotRunning(productionUsWest1); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), false); tester.triggerJobs(); application.runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), application.instance().change()); } @Test public void testBlockRevisionChange() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); Version version = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.clock().advance(Duration.ofHours(1)); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); app.submit(applicationPackage); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(systemTest).runJob(stagingTest); tester.outstandingChangeDeployer().run(); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); assertEquals(emptyList(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(2)); tester.outstandingChangeDeployer().run(); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); app.assertRunning(productionUsWest1); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(true, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.submit(applicationPackage); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); tester.outstandingChangeDeployer().run(); app.runJob(productionUsWest1); assertEquals(1, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.deploymentStatus().outstandingChange(app.instance().name()).application().get().buildNumber().getAsLong()); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app.runJob(productionUsEast3); assertEquals(2, tester.jobs().active().size()); assertEquals(Change.empty(), app.instance().change()); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); tester.outstandingChangeDeployer().run(); assertTrue(app.instance().change().hasTargets()); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(productionUsWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); } @Test public void testJobPause() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.controllerTester().upgradeSystem(new Version("9.8.7")); tester.upgrader().maintain(); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsEast3, tester.clock().instant().plus(Duration.ofSeconds(3))); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1500)); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); app.failDeployment(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1000)); tester.triggerJobs(); app.runJob(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsEast3); tester.deploymentTrigger().forceTrigger(app.instanceId(), productionUsEast3, "mrTrigger", true); app.assertRunning(productionUsEast3); assertFalse(app.instance().jobPause(productionUsEast3).isPresent()); } @Test public void applicationVersionIsNotDowngraded() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest) .timeOutUpgrade(productionUsCentral1); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(Change.of(appVersion1), app.instance().change()); tester.deploymentTrigger().cancelChange(app.instanceId(), ALL); assertEquals(Change.empty(), app.instance().change()); Version version1 = new Version("6.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).failDeployment(productionUsCentral1); app.runJob(systemTest).runJob(stagingTest); app.runJob(productionUsCentral1).runJob(productionEuWest1); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test public void downgradingApplicationVersionWorks() { var app = tester.newDeploymentContext().submit().deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion0)); assertEquals(Change.of(appVersion0), app.instance().change()); app.runJob(stagingTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); assertEquals(Change.empty(), app.instance().change()); assertEquals(appVersion0, app.instance().deployments().get(productionUsEast3.zone(tester.controller().system())).applicationVersion()); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); } @Test public void settingANoOpChangeIsANoOp() { var app = tester.newDeploymentContext().submit(); assertEquals(Optional.empty(), app.instance().latestDeployed()); app.deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion1)); assertEquals(Change.empty(), app.instance().change()); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); } @Test public void stepIsCompletePreciselyWhenItShouldBe() { var app1 = tester.newDeploymentContext("tenant1", "app1", "default"); var app2 = tester.newDeploymentContext("tenant1", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); Version version0 = Version.fromString("7.0"); tester.controllerTester().upgradeSystem(version0); app1.submit(applicationPackage).deploy(); app2.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app2.deployPlatform(version1); tester.deploymentTrigger().cancelChange(app1.instanceId(), ALL); Version version2 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); tester.triggerJobs(); app1.jobAborted(systemTest).jobAborted(stagingTest); app1.runJob(systemTest).runJob(stagingTest).timeOutConvergence(productionUsCentral1); assertEquals(version2, app1.deployment(productionUsCentral1.zone(main)).version()); Instant triggered = app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start(); tester.clock().advance(Duration.ofHours(1)); tester.upgrader().overrideConfidence(version2, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); assertEquals("Change becomes latest non-broken version", Change.of(version1), app1.instance().change()); app1.runJob(systemTest).runJob(stagingTest) .failDeployment(productionEuWest1); assertEquals(triggered, app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start()); ApplicationVersion revision1 = app1.lastSubmission().get(); app1.submit(applicationPackage); ApplicationVersion revision2 = app1.lastSubmission().get(); app1.runJob(systemTest).runJob(stagingTest); assertEquals(Change.of(version1).with(revision2), app1.instance().change()); tester.triggerJobs(); app1.assertRunning(productionUsCentral1); assertEquals(version2, app1.instance().deployments().get(productionUsCentral1.zone(main)).version()); assertEquals(revision1, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); assertTrue(triggered.isBefore(app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start())); app1.timeOutUpgrade(productionUsCentral1); assertEquals(version2, app1.instance().deployments().get(productionUsCentral1.zone(main)).version()); assertEquals(revision2, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); tester.clock().advance(Duration.ofHours(2).plus(Duration.ofSeconds(1))); tester.triggerJobs(); app1.assertNotRunning(productionUsCentral1); app1.runJob(systemTest).runJob(stagingTest).runJob(productionEuWest1); assertFalse(app1.instance().change().hasTargets()); assertFalse(app1.instanceJobs().get(productionUsCentral1).isSuccess()); } @Test public void eachParallelDeployTargetIsTested() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .parallel("eu-west-1", "us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); app.timeOutConvergence(productionEuWest1); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(v2, app.deployment(productionEuWest1.zone(main)).version()); assertEquals(v1, app.deployment(productionUsEast3.zone(main)).version()); app.submit(applicationPackage); tester.triggerJobs(); Version firstTested = app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform(); assertEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertNotEquals(firstTested, app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform()); assertNotEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); app.triggerJobs().jobAborted(productionUsEast3); app.failDeployment(productionEuWest1).failDeployment(productionUsEast3) .runJob(productionEuWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); assertEquals(2, app.instanceJobs().get(productionEuWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); } @Test public void retriesFailingJobs() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage).runJob(stagingTest).failDeployment(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertRunning(systemTest); tester.clock().advance(Duration.ofSeconds(1)); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofMinutes(10).plus(Duration.ofSeconds(1))); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.clock().advance(Duration.ofMinutes(15)); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofSeconds(2)); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); app.submit(applicationPackage).deploy(); assertTrue("Deployment completed", tester.jobs().active().isEmpty()); } @Test public void testPlatformVersionSelection() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); Version version1 = tester.controller().readSystemVersion(); var app1 = tester.newDeploymentContext(); app1.submit(applicationPackage).deploy(); assertEquals("First deployment gets system version", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); Version version2 = new Version(version1.getMajor(), version1.getMinor() + 1); tester.controllerTester().upgradeSystem(version2); applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); app1.submit(applicationPackage).deploy(); assertEquals("Application change preserves version, and new region gets oldest version too", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); assertFalse("Change deployed", app1.instance().change().hasTargets()); tester.upgrader().maintain(); app1.deployPlatform(version2); assertEquals("Version upgrade changes version", version2, app1.application().oldestDeployedPlatform().get()); assertEquals(version2, tester.configServer().lastPrepareVersion().get()); } @Test public void requeueOutOfCapacityStagingJob() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); var app1 = tester.newDeploymentContext("tenant1", "app1", "default").submit(applicationPackage); var app2 = tester.newDeploymentContext("tenant2", "app2", "default").submit(applicationPackage); var app3 = tester.newDeploymentContext("tenant3", "app3", "default").submit(applicationPackage); app2.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app1.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app3.runJob(systemTest); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); tester.abortAll(); assertEquals(List.of(), tester.jobs().active()); tester.readyJobsTrigger().maintain(); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app3.outOfCapacity(stagingTest); app1.abortJob(stagingTest); app2.abortJob(stagingTest); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app2.deploy(); app3.deploy(); app1.runJob(stagingTest); assertEquals(0, tester.jobs().active().size()); tester.controllerTester().upgradeSystem(new Version("6.2")); tester.upgrader().maintain(); app1.submit(applicationPackage); tester.readyJobsTrigger().run(); app1.assertRunning(systemTest); app1.assertRunning(stagingTest); assertEquals(2, tester.jobs().active().size()); tester.triggerJobs(); app3.outOfCapacity(systemTest); app1.abortJob(systemTest); app1.abortJob(stagingTest); app2.abortJob(systemTest); app2.abortJob(stagingTest); app3.abortJob(stagingTest); assertEquals(0, tester.jobs().active().size()); assertTrue(app1.instance().change().application().isPresent()); assertFalse(app2.instance().change().application().isPresent()); assertFalse(app3.instance().change().application().isPresent()); tester.readyJobsTrigger().maintain(); app1.assertRunning(stagingTest); app3.assertRunning(systemTest); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app1.assertRunning(systemTest); assertEquals(4, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); app2.assertRunning(stagingTest); app2.assertRunning(systemTest); assertEquals(6, tester.jobs().active().size()); } @Test public void testUserInstancesNotInDeploymentSpec() { var app = tester.newDeploymentContext(); tester.controller().applications().createInstance(app.application().id().instance("user")); app.submit().deploy(); } @Test @Test public void testMultipleInstances() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("instance1,instance2") .region("us-east-3") .build(); var app = tester.newDeploymentContext("tenant1", "application1", "instance1") .submit(applicationPackage) .completeRollout(); assertEquals(2, app.application().instances().size()); assertEquals(2, app.application().productionDeployments().values().stream() .mapToInt(Collection::size) .sum()); } @Test public void testDeclaredProductionTests() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .delay(Duration.ofMinutes(1)) .test("us-east-3") .region("us-west-1") .region("us-central-1") .test("us-central-1") .test("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(1)); app.runJob(testUsEast3) .runJob(productionUsWest1).runJob(productionUsCentral1) .runJob(testUsCentral1).runJob(testUsWest1); assertEquals(Change.empty(), app.instance().change()); Version version0 = app.application().oldestDeployedPlatform().get(); Version version1 = Version.fromString("7.7"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.triggerJobs(); app.assertRunning(testUsEast3); tester.upgrader().overrideConfidence(version1, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); app.failDeployment(testUsEast3); app.assertNotRunning(testUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version0).withPin()); app.runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.clock().advance(Duration.ofMinutes(11)); app.runJob(testUsEast3); assertEquals(Change.empty().withPin(), app.instance().change()); } @Test public void testDeployComplicatedDeploymentSpec() { String complicatedDeploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <parallel>\n" + " <instance id='instance' athenz-service='in-service'>\n" + " <staging />\n" + " <prod>\n" + " <parallel>\n" + " <region active='true'>us-west-1</region>\n" + " <steps>\n" + " <region active='true'>us-east-3</region>\n" + " <delay hours='2' />\n" + " <region active='true'>eu-west-1</region>\n" + " <delay hours='2' />\n" + " </steps>\n" + " <steps>\n" + " <delay hours='3' />\n" + " <region active='true'>aws-us-east-1a</region>\n" + " <parallel>\n" + " <region active='true' athenz-service='no-service'>ap-northeast-1</region>\n" + " <region active='true'>ap-northeast-2</region>\n" + " <test>aws-us-east-1a</test>\n" + " </parallel>\n" + " </steps>\n" + " <delay hours='3' minutes='30' />\n" + " </parallel>\n" + " <parallel>\n" + " <test>ap-northeast-2</test>\n" + " <test>ap-northeast-1</test>\n" + " </parallel>\n" + " <test>us-east-3</test>\n" + " <region active='true'>ap-southeast-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id='foo' container-id='bar'>\n" + " <region>us-east-3</region>\n" + " </endpoint>\n" + " <endpoint id='nalle' container-id='frosk' />\n" + " <endpoint container-id='quux' />\n" + " </endpoints>\n" + " </instance>\n" + " <instance id='other'>\n" + " <upgrade policy='conservative' />\n" + " <test />\n" + " <block-change revision='true' version='false' days='sat' hours='0-23' time-zone='CET' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " <notifications when='failing'>\n" + " <email role='author' />\n" + " <email address='john@dev' when='failing-commit' />\n" + " <email address='jane@dev' />\n" + " </notifications>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='last'>\n" + " <upgrade policy='conservative' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(complicatedDeploymentSpec); var app1 = tester.newDeploymentContext("t", "a", "instance").submit(applicationPackage); var app2 = tester.newDeploymentContext("t", "a", "other"); var app3 = tester.newDeploymentContext("t", "a", "last"); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(systemTest); app1.runJob(productionUsWest1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.assertNotRunning(testEuWest1); app2.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(testEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app1.runJob(testAwsUsEast1a); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApNortheast1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); app1.runJob(testApNortheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApSoutheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.atMondayMorning().clock().advance(Duration.ofDays(5)); Version version = Version.fromString("8.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); assertEquals(Change.of(version), app1.instance().change()); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app2.failDeployment(systemTest); app1.submit(applicationPackage); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app1.instance().change()); app2.runJob(systemTest); app1.jobAborted(stagingTest) .runJob(stagingTest) .runJob(productionUsWest1) .runJob(productionUsEast3); app1.runJob(stagingTest); app2.runJob(systemTest); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); tester.triggerJobs(); app1.runJob(testAwsUsEast1a); app1.runJob(productionApNortheast2); app1.runJob(productionApNortheast1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(testApNortheast1); app1.runJob(testApNortheast2); app1.runJob(testUsEast3); app1.runJob(productionApSoutheast1); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app1.runJob(stagingTest); app2.runJob(systemTest) .runJob(productionEuWest1) .failDeployment(testEuWest1); tester.clock().advance(Duration.ofDays(1)); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(0, tester.jobs().active().size()); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app2.instance().change()); app2.runJob(productionEuWest1) .runJob(testEuWest1); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(version), app3.instance().change()); tester.deploymentTrigger().cancelChange(app3.instanceId(), ALL); tester.outstandingChangeDeployer().run(); tester.upgrader().maintain(); assertEquals(Change.of(app1.application().latestVersion().get()), app3.instance().change()); app3.runJob(productionEuWest1); tester.upgrader().maintain(); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); assertEquals(Change.empty(), app3.instance().change()); } @Test public void testChangeCompletion() { var app = tester.newDeploymentContext().submit().deploy(); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsCentral1); app.submit(); tester.triggerJobs(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(version), app.instance().change()); app.runJob(productionUsEast3).runJob(productionUsWest1); tester.triggerJobs(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(app.lastSubmission().get()), app.instance().change()); } @Test public void mixedDirectAndPipelineJobsInProduction() { ApplicationPackage cdPackage = new ApplicationPackageBuilder().region("cd-us-east-1") .region("cd-aws-us-east-1a") .build(); var zones = List.of(ZoneId.from("test.cd-us-west-1"), ZoneId.from("staging.cd-us-west-1"), ZoneId.from("prod.cd-us-east-1"), ZoneId.from("prod.cd-aws-us-east-1a")); tester.controllerTester() .setZones(zones, SystemName.cd) .setRoutingMethod(zones, RoutingMethod.shared); tester.controllerTester().upgradeSystem(Version.fromString("6.1")); tester.controllerTester().computeVersionStatus(); var app = tester.newDeploymentContext(); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(systemTest) .jobAborted(stagingTest) .runJob(systemTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .jobAborted(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); } @Test public void testsInSeparateInstance() { String deploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id='canary'>\n" + " <upgrade policy='canary' />\n" + " <test />\n" + " <staging />\n" + " </instance>\n" + " <instance id='default'>\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentSpec); var canary = tester.newDeploymentContext("t", "a", "canary").submit(applicationPackage); var conservative = tester.newDeploymentContext("t", "a", "default"); canary.runJob(systemTest) .runJob(stagingTest); conservative.runJob(productionEuWest1) .runJob(testEuWest1); canary.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest); tester.outstandingChangeDeployer().run(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); tester.controllerTester().upgradeSystem(new Version("7.7.7")); tester.upgrader().maintain(); canary.runJob(systemTest) .runJob(stagingTest); tester.upgrader().maintain(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); } @Test public void testEagerTests() { var app = tester.newDeploymentContext().submit().deploy(); Version version1 = new Version("7.8.9"); ApplicationVersion build1 = app.lastSubmission().get(); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(stagingTest); app.submit(); ApplicationVersion build2 = app.lastSubmission().get(); assertNotEquals(build1, build2); tester.triggerJobs(); app.assertRunning(stagingTest); assertEquals(version1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetPlatform()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetApplication()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourcePlatform().get()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourceApplication().get()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); assertEquals(build2, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetApplication()); app.runJob(systemTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); tester.outstandingChangeDeployer().run(); assertEquals(RunStatus.running, tester.jobs().last(app.instanceId(), stagingTest).get().status()); app.runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(stagingTest); } @Test public void testTriggeringOfIdleTestJobsWhenFirstDeploymentIsOnNewerVersionThanChange() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder().systemTest() .stagingTest() .region("us-east-3") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); var appToAvoidVersionGC = tester.newDeploymentContext("g", "c", "default").submit().deploy(); Version version2 = new Version("7.8.9"); Version version3 = new Version("8.9.10"); tester.controllerTester().upgradeSystem(version2); tester.deploymentTrigger().triggerChange(appToAvoidVersionGC.instanceId(), Change.of(version2)); appToAvoidVersionGC.deployPlatform(version2); tester.controllerTester().upgradeSystem(version3); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version3)); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.upgrader().overrideConfidence(version3, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().run(); assertEquals(Optional.of(version2), app.instance().change().platform()); app.runJob(systemTest) .runJob(productionUsEast3) .runJob(stagingTest) .runJob(productionUsWest1); assertEquals(version3, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetPlatform()); assertEquals(version2, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetPlatform()); assertEquals(Map.of(), app.deploymentStatus().jobsToRun()); assertEquals(Change.empty(), app.instance().change()); assertEquals(List.of(), tester.jobs().active()); } @Test public void testRetriggerQueue() { var app = tester.newDeploymentContext().submit().deploy(); app.submit(); tester.triggerJobs(); tester.deploymentTrigger().reTrigger(app.instanceId(), productionUsEast3); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries(); Assert.assertEquals(1, retriggerEntries.size()); } }
Since you already created an object for this you could delegate this to it.
public void init() { super.init(); fieldConsumerSettings.debugRendering = false; fieldConsumerSettings.jsonDeepMaps = false; fieldConsumerSettings.jsonWsets = false; fieldConsumerSettings.jsonMapsAll = false; fieldConsumerSettings.jsonWsetsAll = false; fieldConsumerSettings.tensorShortForm = false; setGenerator(null, fieldConsumerSettings); renderedChildren = null; timeSource = System::currentTimeMillis; stream = null; }
fieldConsumerSettings.debugRendering = false;
public void init() { super.init(); fieldConsumerSettings.init(); setGenerator(null, fieldConsumerSettings); renderedChildren = null; timeSource = System::currentTimeMillis; stream = null; }
class FieldConsumerSettings { boolean debugRendering = false; boolean jsonDeepMaps = false; boolean jsonWsets = false; boolean jsonMapsAll = false; boolean jsonWsetsAll = false; boolean tensorShortForm = false; boolean convertDeep() { return (jsonDeepMaps || jsonWsets); } }
class {@link FieldConsumer}
Perhaps the whole result could be kept here, so there could be a separate method to get the cleanup task? In my view, it's a bit odd that waitForNextGen returns a cleanup task.
public Runnable waitForNextComponentGeneration(Injector discInjector, boolean isInitializing) { Container.ComponentGraphResult result = container.waitForNextComponentGeneration( this.currentGraph, createFallbackInjector(vespaContainer, discInjector), isInitializing); this.currentGraph = result.newGraph(); return result.oldComponentsCleanupTask(); }
this.currentGraph = result.newGraph();
public Runnable waitForNextComponentGeneration(Injector discInjector, boolean isInitializing) { Container.ComponentGraphResult result = container.waitForNextComponentGeneration( this.currentGraph, createFallbackInjector(vespaContainer, discInjector), isInitializing); this.currentGraph = result.newGraph(); return result.oldComponentsCleanupTask(); }
class ContainerAndDiOsgi extends OsgiImpl implements OsgiWrapper { private final OsgiFramework osgiFramework; private final ApplicationBundleLoader applicationBundleLoader; private final PlatformBundleLoader platformBundleLoader; public ContainerAndDiOsgi(OsgiFramework osgiFramework, FileAcquirer fileAcquirer) { super(osgiFramework); this.osgiFramework = osgiFramework; applicationBundleLoader = new ApplicationBundleLoader(this, new FileAcquirerBundleInstaller(fileAcquirer)); platformBundleLoader = new PlatformBundleLoader(this); } @Override public void installPlatformBundles(Collection<String> bundlePaths) { if (osgiFramework.isFelixFramework()) { log.fine("Installing platform bundles."); platformBundleLoader.useBundles(new ArrayList<>(bundlePaths)); } } @Override public Set<Bundle> useApplicationBundles(Collection<FileReference> bundles) { log.info("Installing bundles from the latest application"); return applicationBundleLoader.useBundles(new ArrayList<>(bundles)); } }
class ContainerAndDiOsgi extends OsgiImpl implements OsgiWrapper { private final OsgiFramework osgiFramework; private final ApplicationBundleLoader applicationBundleLoader; private final PlatformBundleLoader platformBundleLoader; public ContainerAndDiOsgi(OsgiFramework osgiFramework, FileAcquirer fileAcquirer) { super(osgiFramework); this.osgiFramework = osgiFramework; applicationBundleLoader = new ApplicationBundleLoader(this, new FileAcquirerBundleInstaller(fileAcquirer)); platformBundleLoader = new PlatformBundleLoader(this); } @Override public void installPlatformBundles(Collection<String> bundlePaths) { if (osgiFramework.isFelixFramework()) { log.fine("Installing platform bundles."); platformBundleLoader.useBundles(new ArrayList<>(bundlePaths)); } } @Override public Set<Bundle> useApplicationBundles(Collection<FileReference> bundles) { log.info("Installing bundles from the latest application"); return applicationBundleLoader.useBundles(new ArrayList<>(bundles)); } }
The cleanup task may not necessarily be executed in chronological order. Only the caller, `ConfiguredApplication`, knows when the previous components can be safely deconstructed. `HandlersConfigurerDi` may store the cleanup tasks if indexed on config generation
public Runnable waitForNextComponentGeneration(Injector discInjector, boolean isInitializing) { Container.ComponentGraphResult result = container.waitForNextComponentGeneration( this.currentGraph, createFallbackInjector(vespaContainer, discInjector), isInitializing); this.currentGraph = result.newGraph(); return result.oldComponentsCleanupTask(); }
this.currentGraph = result.newGraph();
public Runnable waitForNextComponentGeneration(Injector discInjector, boolean isInitializing) { Container.ComponentGraphResult result = container.waitForNextComponentGeneration( this.currentGraph, createFallbackInjector(vespaContainer, discInjector), isInitializing); this.currentGraph = result.newGraph(); return result.oldComponentsCleanupTask(); }
class ContainerAndDiOsgi extends OsgiImpl implements OsgiWrapper { private final OsgiFramework osgiFramework; private final ApplicationBundleLoader applicationBundleLoader; private final PlatformBundleLoader platformBundleLoader; public ContainerAndDiOsgi(OsgiFramework osgiFramework, FileAcquirer fileAcquirer) { super(osgiFramework); this.osgiFramework = osgiFramework; applicationBundleLoader = new ApplicationBundleLoader(this, new FileAcquirerBundleInstaller(fileAcquirer)); platformBundleLoader = new PlatformBundleLoader(this); } @Override public void installPlatformBundles(Collection<String> bundlePaths) { if (osgiFramework.isFelixFramework()) { log.fine("Installing platform bundles."); platformBundleLoader.useBundles(new ArrayList<>(bundlePaths)); } } @Override public Set<Bundle> useApplicationBundles(Collection<FileReference> bundles) { log.info("Installing bundles from the latest application"); return applicationBundleLoader.useBundles(new ArrayList<>(bundles)); } }
class ContainerAndDiOsgi extends OsgiImpl implements OsgiWrapper { private final OsgiFramework osgiFramework; private final ApplicationBundleLoader applicationBundleLoader; private final PlatformBundleLoader platformBundleLoader; public ContainerAndDiOsgi(OsgiFramework osgiFramework, FileAcquirer fileAcquirer) { super(osgiFramework); this.osgiFramework = osgiFramework; applicationBundleLoader = new ApplicationBundleLoader(this, new FileAcquirerBundleInstaller(fileAcquirer)); platformBundleLoader = new PlatformBundleLoader(this); } @Override public void installPlatformBundles(Collection<String> bundlePaths) { if (osgiFramework.isFelixFramework()) { log.fine("Installing platform bundles."); platformBundleLoader.useBundles(new ArrayList<>(bundlePaths)); } } @Override public Set<Bundle> useApplicationBundles(Collection<FileReference> bundles) { log.info("Installing bundles from the latest application"); return applicationBundleLoader.useBundles(new ArrayList<>(bundles)); } }
The cheapest API would be to get all HostInfos for an application in the Orchestrator, which has the same performance per application as getNodeStatus() has per node. If there are any performance issues we could expose the former in the Orchestrator API.
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
if (suspended(host)) return false;
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
Consider reusing the node snapshot from the start of the method here and in `hasHardwareIssue()`. Looks like this class reads nodes too eagerly in general.
private Collection<FailingNode> findActiveFailingNodes() { Set<FailingNode> failingNodes = new HashSet<>(); NodeList activeNodes = nodeRepository().nodes().list(Node.State.active); for (Node node : activeNodes) { Instant graceTimeStart = clock().instant().minus(suspended(node) ? suspendedDownTimeLimit : downTimeLimit); if (node.history().hasEventBefore(History.Event.Type.down, graceTimeStart) && !applicationSuspended(node)) { if (!node.history().hasEventAfter(History.Event.Type.activated, graceTimeStart)) failingNodes.add(new FailingNode(node, "Node has been down longer than " + downTimeLimit)); } } for (Node node : activeNodes) { if (allSuspended(node, activeNodes)) { Node host = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node); if (host.type().isHost()) { List<String> failureReports = reasonsToFailHost(host); if ( ! failureReports.isEmpty()) { failingNodes.add(new FailingNode(node, host.equals(node) ? "Host has failure reports: " + failureReports : "Parent " + host + " has failure reports: " + failureReports)); } } } } return failingNodes; }
Node host = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node);
private Collection<FailingNode> findActiveFailingNodes() { Set<FailingNode> failingNodes = new HashSet<>(); NodeList activeNodes = nodeRepository().nodes().list(Node.State.active); for (Node node : activeNodes) { Instant graceTimeStart = clock().instant().minus(suspended(node) ? suspendedDownTimeLimit : downTimeLimit); if (node.history().hasEventBefore(History.Event.Type.down, graceTimeStart) && !applicationSuspended(node)) { if (!node.history().hasEventAfter(History.Event.Type.activated, graceTimeStart)) failingNodes.add(new FailingNode(node, "Node has been down longer than " + downTimeLimit)); } } for (Node node : activeNodes) { if (allSuspended(node, activeNodes)) { Node host = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node); if (host.type().isHost()) { List<String> failureReports = reasonsToFailHost(host); if ( ! failureReports.isEmpty()) { failingNodes.add(new FailingNode(node, host.equals(node) ? "Host has failure reports: " + failureReports : "Parent " + host + " has failure reports: " + failureReports)); } } } } return failingNodes; }
class NodeFailer extends NodeRepositoryMaintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); /** Metric for number of hosts that we want to fail, but cannot due to throttling */ static final String throttledHostFailuresMetric = "throttledHostFailures"; /** Metric for number of nodes that we want to fail, but cannot due to throttling */ static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ static final String throttlingActiveMetric = "nodeFailThrottling"; private final Deployer deployer; private final Duration downTimeLimit; private final Duration suspendedDownTimeLimit; private final Orchestrator orchestrator; private final ThrottlePolicy throttlePolicy; private final Metric metric; public NodeFailer(Deployer deployer, NodeRepository nodeRepository, Duration downTimeLimit, Duration interval, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric) { super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric); this.deployer = deployer; this.downTimeLimit = downTimeLimit; this.suspendedDownTimeLimit = downTimeLimit.multipliedBy(4); this.orchestrator = orchestrator; this.throttlePolicy = throttlePolicy; this.metric = metric; } @Override protected double maintain() { if ( ! nodeRepository().nodes().isWorking()) return 0.0; int attempts = 0; int failures = 0; int throttledHostFailures = 0; int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().nodes().lockUnallocated()) { for (FailingNode failing : findReadyFailingNodes()) { attempts++; if (throttle(failing.node())) { failures++; if (failing.node().type().isHost()) throttledHostFailures++; else throttledNodeFailures++; continue; } nodeRepository().nodes().fail(failing.node().hostname(), Agent.NodeFailer, failing.reason()); } } for (FailingNode failing : findActiveFailingNodes()) { attempts++; if (!failAllowedFor(failing.node().type())) continue; if (throttle(failing.node())) { failures++; if (failing.node().type().isHost()) throttledHostFailures++; else throttledNodeFailures++; continue; } failActive(failing); } NodeList activeNodes = nodeRepository().nodes().list(Node.State.active); for (Node host : activeNodes.hosts().failing()) { if ( ! activeNodes.childrenOf(host).isEmpty()) continue; Optional<NodeMutex> locked = Optional.empty(); try { attempts++; locked = nodeRepository().nodes().lockAndGet(host); if (locked.isEmpty()) continue; nodeRepository().nodes().fail(List.of(locked.get().node()), Agent.NodeFailer, "Host should be failed and have no tenant nodes"); } catch (Exception e) { failures++; } finally { locked.ifPresent(NodeMutex::close); } } int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures); metric.set(throttlingActiveMetric, throttlingActive, null); metric.set(throttledHostFailuresMetric, throttledHostFailures, null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); return asSuccessFactor(attempts, failures); } private Collection<FailingNode> findReadyFailingNodes() { Set<FailingNode> failingNodes = new HashSet<>(); for (Node node : nodeRepository().nodes().list(Node.State.ready)) { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node); List<String> failureReports = reasonsToFailHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { failingNodes.add(new FailingNode(node, "Host has failure reports: " + failureReports)); } else { failingNodes.add(new FailingNode(node, "Parent (" + hostNode + ") has failure reports: " + failureReports)); } } } return failingNodes; } public static List<String> reasonsToFailHost(Node host) { return host.reports().getReports().stream() .filter(report -> report.getType().hostShouldBeFailed()) .map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription()) .collect(Collectors.toList()); } /** Returns whether node has any kind of hardware issue */ static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { Node host = node.parentHostname().flatMap(parent -> nodeRepository.nodes().node(parent)).orElse(node); return reasonsToFailHost(host).size() > 0; } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Is the node and all active children suspended? */ private boolean allSuspended(Node node, NodeList activeNodes) { if (!suspended(node)) return false; if (node.parentHostname().isPresent()) return true; return activeNodes.stream() .filter(childNode -> childNode.parentHostname().isPresent() && childNode.parentHostname().get().equals(node.hostname())) .allMatch(this::suspended); } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that. * But we refuse to fail out config(host)/controller(host) */ private boolean failAllowedFor(NodeType nodeType) { switch (nodeType) { case tenant: case host: return true; case proxy: case proxyhost: return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty(); default: return false; } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(FailingNode failing) { Optional<Deployment> deployment = deployer.deployFromLocalActive(failing.node().allocation().get().owner(), Duration.ofMinutes(30)); if (deployment.isEmpty()) return false; try (Mutex lock = nodeRepository().nodes().lock(failing.node().allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + failing.node().hostname() + " failure: " + failing.reason(); for (Node failingTenantNode : nodeRepository().nodes().list().childrenOf(failing.node())) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(new FailingNode(failingTenantNode, reasonForChildFailure)); } else { nodeRepository().nodes().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; wantToFail(failing.node(), true, lock); try { deployment.get().activate(); return true; } catch (TransientException e) { log.log(Level.INFO, "Failed to redeploy " + failing.node().allocation().get().owner() + " with a transient error, will be retried by application maintainer: " + Exceptions.toMessageString(e)); return true; } catch (RuntimeException e) { nodeRepository().nodes().node(failing.node().hostname()) .ifPresent(n -> wantToFail(n, false, lock)); log.log(Level.WARNING, "Could not fail " + failing.node() + " for " + failing.node().allocation().get().owner() + " for " + failing.reason() + ": " + Exceptions.toMessageString(e)); return false; } } } private void wantToFail(Node node, boolean wantToFail, Mutex lock) { nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock); } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow); NodeList allNodes = nodeRepository().nodes().list(); NodeList recentlyFailedNodes = allNodes.state(Node.State.failed) .matching(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false; if (node.parentHostname().isEmpty() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; if (recentlyFailedNodes.parentOf(node).isPresent()) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(allNodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } private static class FailingNode { private final Node node; private final String reason; public FailingNode(Node node, String reason) { this.node = node; this.reason = reason; } public Node node() { return node; } public String reason() { return reason; } @Override public boolean equals(Object other) { if ( ! (other instanceof FailingNode)) return false; return ((FailingNode)other).node().equals(this.node()); } @Override public int hashCode() { return node.hashCode(); } } }
class NodeFailer extends NodeRepositoryMaintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); /** Metric for number of hosts that we want to fail, but cannot due to throttling */ static final String throttledHostFailuresMetric = "throttledHostFailures"; /** Metric for number of nodes that we want to fail, but cannot due to throttling */ static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ static final String throttlingActiveMetric = "nodeFailThrottling"; private final Deployer deployer; private final Duration downTimeLimit; private final Duration suspendedDownTimeLimit; private final Orchestrator orchestrator; private final ThrottlePolicy throttlePolicy; private final Metric metric; public NodeFailer(Deployer deployer, NodeRepository nodeRepository, Duration downTimeLimit, Duration interval, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric) { super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric); this.deployer = deployer; this.downTimeLimit = downTimeLimit; this.suspendedDownTimeLimit = downTimeLimit.multipliedBy(4); this.orchestrator = orchestrator; this.throttlePolicy = throttlePolicy; this.metric = metric; } @Override protected double maintain() { if ( ! nodeRepository().nodes().isWorking()) return 0.0; int attempts = 0; int failures = 0; int throttledHostFailures = 0; int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().nodes().lockUnallocated()) { for (FailingNode failing : findReadyFailingNodes()) { attempts++; if (throttle(failing.node())) { failures++; if (failing.node().type().isHost()) throttledHostFailures++; else throttledNodeFailures++; continue; } nodeRepository().nodes().fail(failing.node().hostname(), Agent.NodeFailer, failing.reason()); } } for (FailingNode failing : findActiveFailingNodes()) { attempts++; if (!failAllowedFor(failing.node().type())) continue; if (throttle(failing.node())) { failures++; if (failing.node().type().isHost()) throttledHostFailures++; else throttledNodeFailures++; continue; } failActive(failing); } NodeList activeNodes = nodeRepository().nodes().list(Node.State.active); for (Node host : activeNodes.hosts().failing()) { if ( ! activeNodes.childrenOf(host).isEmpty()) continue; Optional<NodeMutex> locked = Optional.empty(); try { attempts++; locked = nodeRepository().nodes().lockAndGet(host); if (locked.isEmpty()) continue; nodeRepository().nodes().fail(List.of(locked.get().node()), Agent.NodeFailer, "Host should be failed and have no tenant nodes"); } catch (Exception e) { failures++; } finally { locked.ifPresent(NodeMutex::close); } } int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures); metric.set(throttlingActiveMetric, throttlingActive, null); metric.set(throttledHostFailuresMetric, throttledHostFailures, null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); return asSuccessFactor(attempts, failures); } private Collection<FailingNode> findReadyFailingNodes() { Set<FailingNode> failingNodes = new HashSet<>(); for (Node node : nodeRepository().nodes().list(Node.State.ready)) { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().nodes().node(parent)).orElse(node); List<String> failureReports = reasonsToFailHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { failingNodes.add(new FailingNode(node, "Host has failure reports: " + failureReports)); } else { failingNodes.add(new FailingNode(node, "Parent (" + hostNode + ") has failure reports: " + failureReports)); } } } return failingNodes; } public static List<String> reasonsToFailHost(Node host) { return host.reports().getReports().stream() .filter(report -> report.getType().hostShouldBeFailed()) .map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription()) .collect(Collectors.toList()); } /** Returns whether node has any kind of hardware issue */ static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { Node host = node.parentHostname().flatMap(parent -> nodeRepository.nodes().node(parent)).orElse(node); return reasonsToFailHost(host).size() > 0; } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Is the node and all active children suspended? */ private boolean allSuspended(Node node, NodeList activeNodes) { if (!suspended(node)) return false; if (node.parentHostname().isPresent()) return true; return activeNodes.stream() .filter(childNode -> childNode.parentHostname().isPresent() && childNode.parentHostname().get().equals(node.hostname())) .allMatch(this::suspended); } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that. * But we refuse to fail out config(host)/controller(host) */ private boolean failAllowedFor(NodeType nodeType) { switch (nodeType) { case tenant: case host: return true; case proxy: case proxyhost: return nodeRepository().nodes().list(Node.State.failed).nodeType(nodeType).isEmpty(); default: return false; } } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(FailingNode failing) { Optional<Deployment> deployment = deployer.deployFromLocalActive(failing.node().allocation().get().owner(), Duration.ofMinutes(30)); if (deployment.isEmpty()) return false; try (Mutex lock = nodeRepository().nodes().lock(failing.node().allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + failing.node().hostname() + " failure: " + failing.reason(); for (Node failingTenantNode : nodeRepository().nodes().list().childrenOf(failing.node())) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(new FailingNode(failingTenantNode, reasonForChildFailure)); } else { nodeRepository().nodes().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; wantToFail(failing.node(), true, lock); try { deployment.get().activate(); return true; } catch (TransientException e) { log.log(Level.INFO, "Failed to redeploy " + failing.node().allocation().get().owner() + " with a transient error, will be retried by application maintainer: " + Exceptions.toMessageString(e)); return true; } catch (RuntimeException e) { nodeRepository().nodes().node(failing.node().hostname()) .ifPresent(n -> wantToFail(n, false, lock)); log.log(Level.WARNING, "Could not fail " + failing.node() + " for " + failing.node().allocation().get().owner() + " for " + failing.reason() + ": " + Exceptions.toMessageString(e)); return false; } } } private void wantToFail(Node node, boolean wantToFail, Mutex lock) { nodeRepository().nodes().write(node.withWantToFail(wantToFail, Agent.NodeFailer, clock().instant()), lock); } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock().instant().minus(throttlePolicy.throttleWindow); NodeList allNodes = nodeRepository().nodes().list(); NodeList recentlyFailedNodes = allNodes.state(Node.State.failed) .matching(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(allNodes.size())) return false; if (node.parentHostname().isEmpty() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; if (recentlyFailedNodes.parentOf(node).isPresent()) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(allNodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } private static class FailingNode { private final Node node; private final String reason; public FailingNode(Node node, String reason) { this.node = node; this.reason = reason; } public Node node() { return node; } public String reason() { return reason; } @Override public boolean equals(Object other) { if ( ! (other instanceof FailingNode)) return false; return ((FailingNode)other).node().equals(this.node()); } @Override public int hashCode() { return node.hashCode(); } } }
We should be more conservative here. Consider a controller that restarts and does this in the middle of another controller first accepting a new package, but before it has deployed it anywhere. I suggest eliminating anything older than the oldest version deployed, but always keep at least one. That should be safe?
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } controller.jobController().deploymentStatus(application.get()); for (Notification notification : controller.notificationsDb().listNotifications(NotificationSource.from(application.get().id()), true)) { if ( ! notification.source().instance().map(declaredInstances::contains).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); if (notification.source().instance().isPresent() && ! notification.source().zoneId().map(application.get().require(notification.source().instance().get()).deployments()::containsKey).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); } var existingVersions = application.get() .instances() .values() .stream() .flatMap(instance -> instance.deployments().values().stream()) .map(Deployment::applicationVersion) .collect(Collectors.toSet()); for (ApplicationVersion version : application.get().versions()) { if (!existingVersions.contains(version)) application = application.withoutVersion(version); } store(application); return application; }
for (ApplicationVersion version : application.get().versions()) {
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } controller.jobController().deploymentStatus(application.get()); for (Notification notification : controller.notificationsDb().listNotifications(NotificationSource.from(application.get().id()), true)) { if ( ! notification.source().instance().map(declaredInstances::contains).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); if (notification.source().instance().isPresent() && ! notification.source().zoneId().map(application.get().require(notification.source().instance().get()).deployments()::containsKey).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); } var oldestDeployedVersion = application.get() .instances() .values() .stream() .flatMap(instance -> instance.deployments().values().stream()) .map(Deployment::applicationVersion) .sorted() .findFirst() .orElse(ApplicationVersion.unknown); var olderVersions = application.get().versions() .stream() .filter(version -> version.compareTo(oldestDeployedVersion) < 0) .sorted() .collect(Collectors.toList()); for (int i = 0; i < olderVersions.size() - 1; i++) { application = application.withoutVersion(olderVersions.get(i)); } store(application); return application; }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificates endpointCertificates; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = Objects.requireNonNull(controller); this.curator = Objects.requireNonNull(curator); this.accessControl = Objects.requireNonNull(accessControl); this.configServer = controller.serviceRegistry().configServer(); this.clock = Objects.requireNonNull(clock); this.billingController = Objects.requireNonNull(billingController); artifactRepository = controller.serviceRegistry().artifactRepository(); applicationStore = controller.serviceRegistry().applicationStore(); dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificates = new EndpointCertificates(controller, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator()); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, Text.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly, Double speed) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly, speed); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), NodeFilter.all() .applications(job.application()) .states(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); DeploymentId deployment = new DeploymentId(job.application(), zone); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(deployment, revision)); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec()); containerEndpoints = controller.routing().of(deployment).prepare(application); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, run.isDryRun()); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(deployment) : NotificationSource.from(applicationId); @SuppressWarnings("deprecation") List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream() .filter(log -> log.applicationPackage) .filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()) .map(log -> log.message) .sorted() .distinct() .collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage); else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version, boolean allowDowngrade) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version, allowDowngrade); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), false); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), false); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, boolean dryRun) { DeploymentId deployment = new DeploymentId(application, zone); try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(deployment, clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(deployment).stream() .map(SupportAccessGrant::certificate) .collect(toList()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, deploymentQuota, tenantSecretStores, operatorCertificates, dryRun)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { if ( ! application.instance().isTester()) { controller.routing().of(deployment).configure(applicationPackage.deploymentSpec()); } } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { deleteApplication(id, Optional.of(credentials)); } public void deleteApplication(TenantAndApplicationId id, Optional<Credentials> credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); credentials.ifPresent(creds -> accessControl.deleteApplication(id, creds)); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().of(id).configure(application.get().deploymentSpec()); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); if (!zone.environment().isTest()) controller.notificationsDb().removeNotifications(NotificationSource.from(id)); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major, which is not newer than the system version. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { VersionStatus versions = controller.readVersionStatus(); Version systemVersion = controller.systemVersion(versions); return versions.versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .filter(version -> ! version.isAfter(systemVersion)) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificates endpointCertificates; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = Objects.requireNonNull(controller); this.curator = Objects.requireNonNull(curator); this.accessControl = Objects.requireNonNull(accessControl); this.configServer = controller.serviceRegistry().configServer(); this.clock = Objects.requireNonNull(clock); this.billingController = Objects.requireNonNull(billingController); artifactRepository = controller.serviceRegistry().artifactRepository(); applicationStore = controller.serviceRegistry().applicationStore(); dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificates = new EndpointCertificates(controller, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator()); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, Text.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly, Double speed) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly, speed); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), NodeFilter.all() .applications(job.application()) .states(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); DeploymentId deployment = new DeploymentId(job.application(), zone); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(deployment, revision)); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec()); containerEndpoints = controller.routing().of(deployment).prepare(application); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, run.isDryRun()); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(deployment) : NotificationSource.from(applicationId); @SuppressWarnings("deprecation") List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream() .filter(log -> log.applicationPackage) .filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()) .map(log -> log.message) .sorted() .distinct() .collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage); else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version, boolean allowDowngrade) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version, allowDowngrade); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), false); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), false); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, boolean dryRun) { DeploymentId deployment = new DeploymentId(application, zone); try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(deployment, clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(deployment).stream() .map(SupportAccessGrant::certificate) .collect(toList()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, deploymentQuota, tenantSecretStores, operatorCertificates, dryRun)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { if ( ! application.instance().isTester()) { controller.routing().of(deployment).configure(applicationPackage.deploymentSpec()); } } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { deleteApplication(id, Optional.of(credentials)); } public void deleteApplication(TenantAndApplicationId id, Optional<Credentials> credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); credentials.ifPresent(creds -> accessControl.deleteApplication(id, creds)); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().of(id).configure(application.get().deploymentSpec()); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); if (!zone.environment().isTest()) controller.notificationsDb().removeNotifications(NotificationSource.from(id)); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major, which is not newer than the system version. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { VersionStatus versions = controller.readVersionStatus(); Version systemVersion = controller.systemVersion(versions); return versions.versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .filter(version -> ! version.isAfter(systemVersion)) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
Should at least be `endsWith`, also below.
private String calculateBundleHash() { Predicate<String> entryMatcher = name -> !name.contains(deploymentFile) && !name.contains(buildMetaFile); SortedMap<String, Long> entryCRCs = ZipStreamReader.getEntryCRCs(new ByteArrayInputStream(zippedContent), entryMatcher); return Hashing.sha1().hashInt(entryCRCs.hashCode()).toString(); }
Predicate<String> entryMatcher = name -> !name.contains(deploymentFile) && !name.contains(buildMetaFile);
private String calculateBundleHash() { Predicate<String> entryMatcher = name -> !name.endsWith(deploymentFile) && !name.endsWith(buildMetaFile); SortedMap<String, Long> entryCRCs = ZipStreamReader.getEntryCRCs(new ByteArrayInputStream(zippedContent), entryMatcher); Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.entrySet().forEach(entry -> { into.putBytes(entry.getKey().getBytes()); into.putLong(entry.getValue()); }); return Hashing.sha1().hashObject(entryCRCs, funnel).toString(); }
class ApplicationPackage { private static final String trustedCertificatesFile = "security/clients.pem"; private static final String buildMetaFile = "build-meta.json"; private static final String deploymentFile = "deployment.xml"; private static final String validationOverridesFile = "validation-overrides.xml"; private static final String servicesFile = "services.xml"; private final String contentHash; private final String bundleHash; private final byte[] zippedContent; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final ZipArchiveCache files; private final Optional<Version> compileVersion; private final Optional<Instant> buildTime; private final List<X509Certificate> trustedCertificates; /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. */ public ApplicationPackage(byte[] zippedContent) { this(zippedContent, false); } /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. * If 'requireFiles' is true, files needed by deployment orchestration must be present. */ public ApplicationPackage(byte[] zippedContent, boolean requireFiles) { this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null"); this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString(); this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile)); Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml); if (requireFiles && deploymentSpec.isEmpty()) throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'"); this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty); this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty); Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get); this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString()))); this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong()))); this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of()); this.bundleHash = calculateBundleHash(); } /** Returns a copy of this with the given certificate appended. */ public ApplicationPackage withTrustedCertificate(X509Certificate certificate) { List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates); trustedCertificates.add(certificate); byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8); ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length); ZipStreamReader.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes); return new ApplicationPackage(modified.toByteArray()); } /** Returns a hash of the content of this package */ public String hash() { return contentHash; } public String bundleHash() { return bundleHash; } /** Returns the content of this package. The content <b>must not</b> be modified. */ public byte[] zippedContent() { return zippedContent; } /** * Returns the deployment spec from the deployment.xml file of the package content. * This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file. */ public DeploymentSpec deploymentSpec() { return deploymentSpec; } /** * Returns the validation overrides from the validation-overrides.xml file of the package content. * This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file. */ public ValidationOverrides validationOverrides() { return validationOverrides; } /** Returns the platform version which package was compiled against, if known. */ public Optional<Version> compileVersion() { return compileVersion; } /** Returns the time this package was built, if known. */ public Optional<Instant> buildTime() { return buildTime; } /** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */ public List<X509Certificate> trustedCertificates() { return trustedCertificates; } private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) { if ( ! buildMetaObject.field(fieldName).valid()) throw new IllegalArgumentException("Missing value '" + fieldName + "' in '" + buildMetaFile + "'"); try { return Optional.of(mapper.apply(buildMetaObject.field(fieldName))); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e)); } } /** Creates a valid application package that will remove all application's deployments */ public static ApplicationPackage deploymentRemoval() { return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8), deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8)))); } /** Returns a zip containing meta data about deployments of this package by the given job. */ public byte[] metaDataZip() { preProcessAndPopulateCache(); return cacheZip(); } private void preProcessAndPopulateCache() { FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile)); if (servicesXml.exists()) try { new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")), new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8), InstanceName.defaultName(), Environment.prod, RegionName.defaultName()) .run(); } catch (Exception e) { throw new RuntimeException(e); } } private byte[] cacheZip() { return filesZip(files.cache.entrySet().stream() .filter(entry -> entry.getValue().isPresent()) .collect(toMap(entry -> entry.getKey().toString(), entry -> entry.getValue().get()))); } static byte[] filesZip(Map<String, byte[]> files) { try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) { files.forEach(zipBuilder::add); zipBuilder.close(); return zipBuilder.toByteArray(); } } private static ValidationOverrides allValidationOverrides() { String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC)); StringBuilder validationOverridesContents = new StringBuilder(1000); validationOverridesContents.append("<validation-overrides version=\"1.0\">\n"); for (ValidationId validationId: ValidationId.values()) validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n"); validationOverridesContents.append("</validation-overrides>\n"); return ValidationOverrides.fromXml(validationOverridesContents.toString()); } /** Maps normalized paths to cached content read from a zip archive. */ private static class ZipArchiveCache { /** Max size of each extracted file */ private static final int maxSize = 10 << 20; private static final String applicationDir = "application/"; private static String withoutLegacyDir(String name) { if (name.startsWith(applicationDir)) return name.substring(applicationDir.length()); return name; } private final byte[] zip; private final Map<Path, Optional<byte[]>> cache; public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) { this.zip = zip; this.cache = new ConcurrentSkipListMap<>(); this.cache.putAll(read(prePopulated)); } public Optional<byte[]> get(String path) { return get(Paths.get(path)); } public Optional<byte[]> get(Path path) { return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get); } public FileSystemWrapper wrapper() { return FileSystemWrapper.ofFiles(path -> get(path).isPresent(), path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString()))); } private Map<Path, Optional<byte[]>> read(Collection<String> names) { var entries = new ZipStreamReader(new ByteArrayInputStream(zip), name -> names.contains(withoutLegacyDir(name)), maxSize, true) .entries().stream() .collect(toMap(entry -> Paths.get(withoutLegacyDir(entry.zipEntry().getName())).normalize(), ZipStreamReader.ZipEntryWithContent::content)); names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty())); return entries; } } }
class ApplicationPackage { private static final String trustedCertificatesFile = "security/clients.pem"; private static final String buildMetaFile = "build-meta.json"; private static final String deploymentFile = "deployment.xml"; private static final String validationOverridesFile = "validation-overrides.xml"; private static final String servicesFile = "services.xml"; private final String contentHash; private final String bundleHash; private final byte[] zippedContent; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final ZipArchiveCache files; private final Optional<Version> compileVersion; private final Optional<Instant> buildTime; private final List<X509Certificate> trustedCertificates; /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. */ public ApplicationPackage(byte[] zippedContent) { this(zippedContent, false); } /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. * If 'requireFiles' is true, files needed by deployment orchestration must be present. */ public ApplicationPackage(byte[] zippedContent, boolean requireFiles) { this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null"); this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString(); this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile)); Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml); if (requireFiles && deploymentSpec.isEmpty()) throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'"); this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty); this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty); Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get); this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString()))); this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong()))); this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of()); this.bundleHash = calculateBundleHash(); } /** Returns a copy of this with the given certificate appended. */ public ApplicationPackage withTrustedCertificate(X509Certificate certificate) { List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates); trustedCertificates.add(certificate); byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8); ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length); ZipStreamReader.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes); return new ApplicationPackage(modified.toByteArray()); } /** Returns a hash of the content of this package */ public String hash() { return contentHash; } public String bundleHash() { return bundleHash; } /** Returns the content of this package. The content <b>must not</b> be modified. */ public byte[] zippedContent() { return zippedContent; } /** * Returns the deployment spec from the deployment.xml file of the package content. * This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file. */ public DeploymentSpec deploymentSpec() { return deploymentSpec; } /** * Returns the validation overrides from the validation-overrides.xml file of the package content. * This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file. */ public ValidationOverrides validationOverrides() { return validationOverrides; } /** Returns the platform version which package was compiled against, if known. */ public Optional<Version> compileVersion() { return compileVersion; } /** Returns the time this package was built, if known. */ public Optional<Instant> buildTime() { return buildTime; } /** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */ public List<X509Certificate> trustedCertificates() { return trustedCertificates; } private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) { if ( ! buildMetaObject.field(fieldName).valid()) throw new IllegalArgumentException("Missing value '" + fieldName + "' in '" + buildMetaFile + "'"); try { return Optional.of(mapper.apply(buildMetaObject.field(fieldName))); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e)); } } /** Creates a valid application package that will remove all application's deployments */ public static ApplicationPackage deploymentRemoval() { return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8), deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8)))); } /** Returns a zip containing meta data about deployments of this package by the given job. */ public byte[] metaDataZip() { preProcessAndPopulateCache(); return cacheZip(); } private void preProcessAndPopulateCache() { FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile)); if (servicesXml.exists()) try { new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")), new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8), InstanceName.defaultName(), Environment.prod, RegionName.defaultName()) .run(); } catch (Exception e) { throw new RuntimeException(e); } } private byte[] cacheZip() { return filesZip(files.cache.entrySet().stream() .filter(entry -> entry.getValue().isPresent()) .collect(toMap(entry -> entry.getKey().toString(), entry -> entry.getValue().get()))); } static byte[] filesZip(Map<String, byte[]> files) { try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) { files.forEach(zipBuilder::add); zipBuilder.close(); return zipBuilder.toByteArray(); } } private static ValidationOverrides allValidationOverrides() { String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC)); StringBuilder validationOverridesContents = new StringBuilder(1000); validationOverridesContents.append("<validation-overrides version=\"1.0\">\n"); for (ValidationId validationId: ValidationId.values()) validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n"); validationOverridesContents.append("</validation-overrides>\n"); return ValidationOverrides.fromXml(validationOverridesContents.toString()); } /** Maps normalized paths to cached content read from a zip archive. */ private static class ZipArchiveCache { /** Max size of each extracted file */ private static final int maxSize = 10 << 20; private static final String applicationDir = "application/"; private static String withoutLegacyDir(String name) { if (name.startsWith(applicationDir)) return name.substring(applicationDir.length()); return name; } private final byte[] zip; private final Map<Path, Optional<byte[]>> cache; public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) { this.zip = zip; this.cache = new ConcurrentSkipListMap<>(); this.cache.putAll(read(prePopulated)); } public Optional<byte[]> get(String path) { return get(Paths.get(path)); } public Optional<byte[]> get(Path path) { return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get); } public FileSystemWrapper wrapper() { return FileSystemWrapper.ofFiles(path -> get(path).isPresent(), path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString()))); } private Map<Path, Optional<byte[]>> read(Collection<String> names) { var entries = new ZipStreamReader(new ByteArrayInputStream(zip), name -> names.contains(withoutLegacyDir(name)), maxSize, true) .entries().stream() .collect(toMap(entry -> Paths.get(withoutLegacyDir(entry.zipEntry().getName())).normalize(), ZipStreamReader.ZipEntryWithContent::content)); names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty())); return entries; } } }
Hmm, why SHA1 a single number?
private String calculateBundleHash() { Predicate<String> entryMatcher = name -> !name.contains(deploymentFile) && !name.contains(buildMetaFile); SortedMap<String, Long> entryCRCs = ZipStreamReader.getEntryCRCs(new ByteArrayInputStream(zippedContent), entryMatcher); return Hashing.sha1().hashInt(entryCRCs.hashCode()).toString(); }
return Hashing.sha1().hashInt(entryCRCs.hashCode()).toString();
private String calculateBundleHash() { Predicate<String> entryMatcher = name -> !name.endsWith(deploymentFile) && !name.endsWith(buildMetaFile); SortedMap<String, Long> entryCRCs = ZipStreamReader.getEntryCRCs(new ByteArrayInputStream(zippedContent), entryMatcher); Funnel<SortedMap<String, Long>> funnel = (from, into) -> from.entrySet().forEach(entry -> { into.putBytes(entry.getKey().getBytes()); into.putLong(entry.getValue()); }); return Hashing.sha1().hashObject(entryCRCs, funnel).toString(); }
class ApplicationPackage { private static final String trustedCertificatesFile = "security/clients.pem"; private static final String buildMetaFile = "build-meta.json"; private static final String deploymentFile = "deployment.xml"; private static final String validationOverridesFile = "validation-overrides.xml"; private static final String servicesFile = "services.xml"; private final String contentHash; private final String bundleHash; private final byte[] zippedContent; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final ZipArchiveCache files; private final Optional<Version> compileVersion; private final Optional<Instant> buildTime; private final List<X509Certificate> trustedCertificates; /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. */ public ApplicationPackage(byte[] zippedContent) { this(zippedContent, false); } /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. * If 'requireFiles' is true, files needed by deployment orchestration must be present. */ public ApplicationPackage(byte[] zippedContent, boolean requireFiles) { this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null"); this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString(); this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile)); Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml); if (requireFiles && deploymentSpec.isEmpty()) throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'"); this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty); this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty); Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get); this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString()))); this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong()))); this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of()); this.bundleHash = calculateBundleHash(); } /** Returns a copy of this with the given certificate appended. */ public ApplicationPackage withTrustedCertificate(X509Certificate certificate) { List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates); trustedCertificates.add(certificate); byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8); ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length); ZipStreamReader.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes); return new ApplicationPackage(modified.toByteArray()); } /** Returns a hash of the content of this package */ public String hash() { return contentHash; } public String bundleHash() { return bundleHash; } /** Returns the content of this package. The content <b>must not</b> be modified. */ public byte[] zippedContent() { return zippedContent; } /** * Returns the deployment spec from the deployment.xml file of the package content. * This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file. */ public DeploymentSpec deploymentSpec() { return deploymentSpec; } /** * Returns the validation overrides from the validation-overrides.xml file of the package content. * This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file. */ public ValidationOverrides validationOverrides() { return validationOverrides; } /** Returns the platform version which package was compiled against, if known. */ public Optional<Version> compileVersion() { return compileVersion; } /** Returns the time this package was built, if known. */ public Optional<Instant> buildTime() { return buildTime; } /** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */ public List<X509Certificate> trustedCertificates() { return trustedCertificates; } private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) { if ( ! buildMetaObject.field(fieldName).valid()) throw new IllegalArgumentException("Missing value '" + fieldName + "' in '" + buildMetaFile + "'"); try { return Optional.of(mapper.apply(buildMetaObject.field(fieldName))); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e)); } } /** Creates a valid application package that will remove all application's deployments */ public static ApplicationPackage deploymentRemoval() { return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8), deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8)))); } /** Returns a zip containing meta data about deployments of this package by the given job. */ public byte[] metaDataZip() { preProcessAndPopulateCache(); return cacheZip(); } private void preProcessAndPopulateCache() { FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile)); if (servicesXml.exists()) try { new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")), new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8), InstanceName.defaultName(), Environment.prod, RegionName.defaultName()) .run(); } catch (Exception e) { throw new RuntimeException(e); } } private byte[] cacheZip() { return filesZip(files.cache.entrySet().stream() .filter(entry -> entry.getValue().isPresent()) .collect(toMap(entry -> entry.getKey().toString(), entry -> entry.getValue().get()))); } static byte[] filesZip(Map<String, byte[]> files) { try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) { files.forEach(zipBuilder::add); zipBuilder.close(); return zipBuilder.toByteArray(); } } private static ValidationOverrides allValidationOverrides() { String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC)); StringBuilder validationOverridesContents = new StringBuilder(1000); validationOverridesContents.append("<validation-overrides version=\"1.0\">\n"); for (ValidationId validationId: ValidationId.values()) validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n"); validationOverridesContents.append("</validation-overrides>\n"); return ValidationOverrides.fromXml(validationOverridesContents.toString()); } /** Maps normalized paths to cached content read from a zip archive. */ private static class ZipArchiveCache { /** Max size of each extracted file */ private static final int maxSize = 10 << 20; private static final String applicationDir = "application/"; private static String withoutLegacyDir(String name) { if (name.startsWith(applicationDir)) return name.substring(applicationDir.length()); return name; } private final byte[] zip; private final Map<Path, Optional<byte[]>> cache; public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) { this.zip = zip; this.cache = new ConcurrentSkipListMap<>(); this.cache.putAll(read(prePopulated)); } public Optional<byte[]> get(String path) { return get(Paths.get(path)); } public Optional<byte[]> get(Path path) { return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get); } public FileSystemWrapper wrapper() { return FileSystemWrapper.ofFiles(path -> get(path).isPresent(), path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString()))); } private Map<Path, Optional<byte[]>> read(Collection<String> names) { var entries = new ZipStreamReader(new ByteArrayInputStream(zip), name -> names.contains(withoutLegacyDir(name)), maxSize, true) .entries().stream() .collect(toMap(entry -> Paths.get(withoutLegacyDir(entry.zipEntry().getName())).normalize(), ZipStreamReader.ZipEntryWithContent::content)); names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty())); return entries; } } }
class ApplicationPackage { private static final String trustedCertificatesFile = "security/clients.pem"; private static final String buildMetaFile = "build-meta.json"; private static final String deploymentFile = "deployment.xml"; private static final String validationOverridesFile = "validation-overrides.xml"; private static final String servicesFile = "services.xml"; private final String contentHash; private final String bundleHash; private final byte[] zippedContent; private final DeploymentSpec deploymentSpec; private final ValidationOverrides validationOverrides; private final ZipArchiveCache files; private final Optional<Version> compileVersion; private final Optional<Instant> buildTime; private final List<X509Certificate> trustedCertificates; /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. */ public ApplicationPackage(byte[] zippedContent) { this(zippedContent, false); } /** * Creates an application package from its zipped content. * This <b>assigns ownership</b> of the given byte array to this class; * it must not be further changed by the caller. * If 'requireFiles' is true, files needed by deployment orchestration must be present. */ public ApplicationPackage(byte[] zippedContent, boolean requireFiles) { this.zippedContent = Objects.requireNonNull(zippedContent, "The application package content cannot be null"); this.contentHash = Hashing.sha1().hashBytes(zippedContent).toString(); this.files = new ZipArchiveCache(zippedContent, Set.of(deploymentFile, validationOverridesFile, servicesFile, buildMetaFile, trustedCertificatesFile)); Optional<DeploymentSpec> deploymentSpec = files.get(deploymentFile).map(bytes -> new String(bytes, UTF_8)).map(DeploymentSpec::fromXml); if (requireFiles && deploymentSpec.isEmpty()) throw new IllegalArgumentException("Missing required file '" + deploymentFile + "'"); this.deploymentSpec = deploymentSpec.orElse(DeploymentSpec.empty); this.validationOverrides = files.get(validationOverridesFile).map(bytes -> new String(bytes, UTF_8)).map(ValidationOverrides::fromXml).orElse(ValidationOverrides.empty); Optional<Inspector> buildMetaObject = files.get(buildMetaFile).map(SlimeUtils::jsonToSlime).map(Slime::get); this.compileVersion = buildMetaObject.flatMap(object -> parse(object, "compileVersion", field -> Version.fromString(field.asString()))); this.buildTime = buildMetaObject.flatMap(object -> parse(object, "buildTime", field -> Instant.ofEpochMilli(field.asLong()))); this.trustedCertificates = files.get(trustedCertificatesFile).map(bytes -> X509CertificateUtils.certificateListFromPem(new String(bytes, UTF_8))).orElse(List.of()); this.bundleHash = calculateBundleHash(); } /** Returns a copy of this with the given certificate appended. */ public ApplicationPackage withTrustedCertificate(X509Certificate certificate) { List<X509Certificate> trustedCertificates = new ArrayList<>(this.trustedCertificates); trustedCertificates.add(certificate); byte[] certificatesBytes = X509CertificateUtils.toPem(trustedCertificates).getBytes(UTF_8); ByteArrayOutputStream modified = new ByteArrayOutputStream(zippedContent.length + certificatesBytes.length); ZipStreamReader.transferAndWrite(modified, new ByteArrayInputStream(zippedContent), trustedCertificatesFile, certificatesBytes); return new ApplicationPackage(modified.toByteArray()); } /** Returns a hash of the content of this package */ public String hash() { return contentHash; } public String bundleHash() { return bundleHash; } /** Returns the content of this package. The content <b>must not</b> be modified. */ public byte[] zippedContent() { return zippedContent; } /** * Returns the deployment spec from the deployment.xml file of the package content. * This is the DeploymentSpec.empty instance if this package does not contain a deployment.xml file. */ public DeploymentSpec deploymentSpec() { return deploymentSpec; } /** * Returns the validation overrides from the validation-overrides.xml file of the package content. * This is the ValidationOverrides.empty instance if this package does not contain a validation-overrides.xml file. */ public ValidationOverrides validationOverrides() { return validationOverrides; } /** Returns the platform version which package was compiled against, if known. */ public Optional<Version> compileVersion() { return compileVersion; } /** Returns the time this package was built, if known. */ public Optional<Instant> buildTime() { return buildTime; } /** Returns the list of certificates trusted by this application, or an empty list if no trust configured. */ public List<X509Certificate> trustedCertificates() { return trustedCertificates; } private static <Type> Optional<Type> parse(Inspector buildMetaObject, String fieldName, Function<Inspector, Type> mapper) { if ( ! buildMetaObject.field(fieldName).valid()) throw new IllegalArgumentException("Missing value '" + fieldName + "' in '" + buildMetaFile + "'"); try { return Optional.of(mapper.apply(buildMetaObject.field(fieldName))); } catch (RuntimeException e) { throw new IllegalArgumentException("Failed parsing \"" + fieldName + "\" in '" + buildMetaFile + "': " + Exceptions.toMessageString(e)); } } /** Creates a valid application package that will remove all application's deployments */ public static ApplicationPackage deploymentRemoval() { return new ApplicationPackage(filesZip(Map.of(validationOverridesFile, allValidationOverrides().xmlForm().getBytes(UTF_8), deploymentFile, DeploymentSpec.empty.xmlForm().getBytes(UTF_8)))); } /** Returns a zip containing meta data about deployments of this package by the given job. */ public byte[] metaDataZip() { preProcessAndPopulateCache(); return cacheZip(); } private void preProcessAndPopulateCache() { FileWrapper servicesXml = files.wrapper().wrap(Paths.get(servicesFile)); if (servicesXml.exists()) try { new XmlPreProcessor(files.wrapper().wrap(Paths.get("./")), new InputStreamReader(new ByteArrayInputStream(servicesXml.content()), UTF_8), InstanceName.defaultName(), Environment.prod, RegionName.defaultName()) .run(); } catch (Exception e) { throw new RuntimeException(e); } } private byte[] cacheZip() { return filesZip(files.cache.entrySet().stream() .filter(entry -> entry.getValue().isPresent()) .collect(toMap(entry -> entry.getKey().toString(), entry -> entry.getValue().get()))); } static byte[] filesZip(Map<String, byte[]> files) { try (ZipBuilder zipBuilder = new ZipBuilder(files.values().stream().mapToInt(bytes -> bytes.length).sum() + 512)) { files.forEach(zipBuilder::add); zipBuilder.close(); return zipBuilder.toByteArray(); } } private static ValidationOverrides allValidationOverrides() { String until = DateTimeFormatter.ISO_LOCAL_DATE.format(Instant.now().plus(Duration.ofDays(25)).atZone(ZoneOffset.UTC)); StringBuilder validationOverridesContents = new StringBuilder(1000); validationOverridesContents.append("<validation-overrides version=\"1.0\">\n"); for (ValidationId validationId: ValidationId.values()) validationOverridesContents.append("\t<allow until=\"").append(until).append("\">").append(validationId.value()).append("</allow>\n"); validationOverridesContents.append("</validation-overrides>\n"); return ValidationOverrides.fromXml(validationOverridesContents.toString()); } /** Maps normalized paths to cached content read from a zip archive. */ private static class ZipArchiveCache { /** Max size of each extracted file */ private static final int maxSize = 10 << 20; private static final String applicationDir = "application/"; private static String withoutLegacyDir(String name) { if (name.startsWith(applicationDir)) return name.substring(applicationDir.length()); return name; } private final byte[] zip; private final Map<Path, Optional<byte[]>> cache; public ZipArchiveCache(byte[] zip, Collection<String> prePopulated) { this.zip = zip; this.cache = new ConcurrentSkipListMap<>(); this.cache.putAll(read(prePopulated)); } public Optional<byte[]> get(String path) { return get(Paths.get(path)); } public Optional<byte[]> get(Path path) { return cache.computeIfAbsent(path.normalize(), read(List.of(path.normalize().toString()))::get); } public FileSystemWrapper wrapper() { return FileSystemWrapper.ofFiles(path -> get(path).isPresent(), path -> get(path).orElseThrow(() -> new NoSuchFileException(path.toString()))); } private Map<Path, Optional<byte[]>> read(Collection<String> names) { var entries = new ZipStreamReader(new ByteArrayInputStream(zip), name -> names.contains(withoutLegacyDir(name)), maxSize, true) .entries().stream() .collect(toMap(entry -> Paths.get(withoutLegacyDir(entry.zipEntry().getName())).normalize(), ZipStreamReader.ZipEntryWithContent::content)); names.stream().map(Paths::get).forEach(path -> entries.putIfAbsent(path.normalize(), Optional.empty())); return entries; } } }
Hmm. Still worried about getting holes. I was thinking of finding the oldest version deployed anywhere, and then remove everything older than this, but always keep one. That's what we do for packages we store.
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } controller.jobController().deploymentStatus(application.get()); for (Notification notification : controller.notificationsDb().listNotifications(NotificationSource.from(application.get().id()), true)) { if ( ! notification.source().instance().map(declaredInstances::contains).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); if (notification.source().instance().isPresent() && ! notification.source().zoneId().map(application.get().require(notification.source().instance().get()).deployments()::containsKey).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); } var existingVersions = application.get() .instances() .values() .stream() .flatMap(instance -> instance.deployments().values().stream()) .map(Deployment::applicationVersion) .collect(Collectors.toSet()); var oldVersions = application.get().versions() .stream() .filter(version -> !existingVersions.contains(version)) .sorted() .collect(Collectors.toList()); for (int i = 0; i < oldVersions.size() - 1; i++) { application = application.withoutVersion(oldVersions.get(i)); } store(application); return application; }
for (int i = 0; i < oldVersions.size() - 1; i++) {
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } controller.jobController().deploymentStatus(application.get()); for (Notification notification : controller.notificationsDb().listNotifications(NotificationSource.from(application.get().id()), true)) { if ( ! notification.source().instance().map(declaredInstances::contains).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); if (notification.source().instance().isPresent() && ! notification.source().zoneId().map(application.get().require(notification.source().instance().get()).deployments()::containsKey).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); } var oldestDeployedVersion = application.get() .instances() .values() .stream() .flatMap(instance -> instance.deployments().values().stream()) .map(Deployment::applicationVersion) .sorted() .findFirst() .orElse(ApplicationVersion.unknown); var olderVersions = application.get().versions() .stream() .filter(version -> version.compareTo(oldestDeployedVersion) < 0) .sorted() .collect(Collectors.toList()); for (int i = 0; i < olderVersions.size() - 1; i++) { application = application.withoutVersion(olderVersions.get(i)); } store(application); return application; }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificates endpointCertificates; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = Objects.requireNonNull(controller); this.curator = Objects.requireNonNull(curator); this.accessControl = Objects.requireNonNull(accessControl); this.configServer = controller.serviceRegistry().configServer(); this.clock = Objects.requireNonNull(clock); this.billingController = Objects.requireNonNull(billingController); artifactRepository = controller.serviceRegistry().artifactRepository(); applicationStore = controller.serviceRegistry().applicationStore(); dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificates = new EndpointCertificates(controller, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator()); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, Text.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly, Double speed) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly, speed); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), NodeFilter.all() .applications(job.application()) .states(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); DeploymentId deployment = new DeploymentId(job.application(), zone); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(deployment, revision)); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec()); containerEndpoints = controller.routing().of(deployment).prepare(application); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, run.isDryRun()); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(deployment) : NotificationSource.from(applicationId); @SuppressWarnings("deprecation") List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream() .filter(log -> log.applicationPackage) .filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()) .map(log -> log.message) .sorted() .distinct() .collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage); else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version, boolean allowDowngrade) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version, allowDowngrade); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), false); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), false); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, boolean dryRun) { DeploymentId deployment = new DeploymentId(application, zone); try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(deployment, clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(deployment).stream() .map(SupportAccessGrant::certificate) .collect(toList()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, deploymentQuota, tenantSecretStores, operatorCertificates, dryRun)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { if ( ! application.instance().isTester()) { controller.routing().of(deployment).configure(applicationPackage.deploymentSpec()); } } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { deleteApplication(id, Optional.of(credentials)); } public void deleteApplication(TenantAndApplicationId id, Optional<Credentials> credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); credentials.ifPresent(creds -> accessControl.deleteApplication(id, creds)); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().of(id).configure(application.get().deploymentSpec()); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); if (!zone.environment().isTest()) controller.notificationsDb().removeNotifications(NotificationSource.from(id)); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major, which is not newer than the system version. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { VersionStatus versions = controller.readVersionStatus(); Version systemVersion = controller.systemVersion(versions); return versions.versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .filter(version -> ! version.isAfter(systemVersion)) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificates endpointCertificates; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = Objects.requireNonNull(controller); this.curator = Objects.requireNonNull(curator); this.accessControl = Objects.requireNonNull(accessControl); this.configServer = controller.serviceRegistry().configServer(); this.clock = Objects.requireNonNull(clock); this.billingController = Objects.requireNonNull(billingController); artifactRepository = controller.serviceRegistry().artifactRepository(); applicationStore = controller.serviceRegistry().applicationStore(); dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificates = new EndpointCertificates(controller, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator()); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, Text.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly, Double speed) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly, speed); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), NodeFilter.all() .applications(job.application()) .states(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); DeploymentId deployment = new DeploymentId(job.application(), zone); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(deployment, revision)); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec()); containerEndpoints = controller.routing().of(deployment).prepare(application); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, run.isDryRun()); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(deployment) : NotificationSource.from(applicationId); @SuppressWarnings("deprecation") List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream() .filter(log -> log.applicationPackage) .filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()) .map(log -> log.message) .sorted() .distinct() .collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage); else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version, boolean allowDowngrade) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version, allowDowngrade); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), false); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), false); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, boolean dryRun) { DeploymentId deployment = new DeploymentId(application, zone); try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(deployment, clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(deployment).stream() .map(SupportAccessGrant::certificate) .collect(toList()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, deploymentQuota, tenantSecretStores, operatorCertificates, dryRun)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { if ( ! application.instance().isTester()) { controller.routing().of(deployment).configure(applicationPackage.deploymentSpec()); } } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { deleteApplication(id, Optional.of(credentials)); } public void deleteApplication(TenantAndApplicationId id, Optional<Credentials> credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); credentials.ifPresent(creds -> accessControl.deleteApplication(id, creds)); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().of(id).configure(application.get().deploymentSpec()); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); if (!zone.environment().isTest()) controller.notificationsDb().removeNotifications(NotificationSource.from(id)); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major, which is not newer than the system version. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { VersionStatus versions = controller.readVersionStatus(); Version systemVersion = controller.systemVersion(versions); return versions.versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .filter(version -> ! version.isAfter(systemVersion)) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
And this'll be removed later, once we switch to using only the build number+direcly deployed bit as deployment keys :)
private void versionsToSlime(Application application, Cursor object) { application.instances() .values() .stream() .flatMap(instance -> instance.deployments().values().stream()) .map(Deployment::applicationVersion) .forEach(application.versions()::add); application.versions().forEach(version -> toSlime(version, object.addObject())); }
private void versionsToSlime(Application application, Cursor object) { application.instances() .values() .stream() .flatMap(instance -> instance.deployments().values().stream()) .map(Deployment::applicationVersion) .forEach(application.versions()::add); application.versions().forEach(version -> toSlime(version, object.addObject())); }
class ApplicationSerializer { private static final String idField = "id"; private static final String createdAtField = "createdAt"; private static final String deploymentSpecField = "deploymentSpecField"; private static final String validationOverridesField = "validationOverrides"; private static final String instancesField = "instances"; private static final String deployingField = "deployingField"; private static final String projectIdField = "projectId"; private static final String latestVersionField = "latestVersion"; private static final String versionsField = "versions"; private static final String pinnedField = "pinned"; private static final String deploymentIssueField = "deploymentIssueId"; private static final String ownershipIssueIdField = "ownershipIssueId"; private static final String ownerField = "confirmedOwner"; private static final String majorVersionField = "majorVersion"; private static final String writeQualityField = "writeQuality"; private static final String queryQualityField = "queryQuality"; private static final String pemDeployKeysField = "pemDeployKeys"; private static final String assignedRotationClusterField = "clusterId"; private static final String assignedRotationRotationField = "rotationId"; private static final String assignedRotationRegionsField = "regions"; private static final String versionField = "version"; private static final String instanceNameField = "instanceName"; private static final String deploymentsField = "deployments"; private static final String deploymentJobsField = "deploymentJobs"; private static final String assignedRotationsField = "assignedRotations"; private static final String assignedRotationEndpointField = "endpointId"; private static final String latestDeployedField = "latestDeployed"; private static final String zoneField = "zone"; private static final String environmentField = "environment"; private static final String regionField = "region"; private static final String deployTimeField = "deployTime"; private static final String applicationBuildNumberField = "applicationBuildNumber"; private static final String applicationPackageRevisionField = "applicationPackageRevision"; private static final String sourceRevisionField = "sourceRevision"; private static final String repositoryField = "repositoryField"; private static final String branchField = "branchField"; private static final String commitField = "commitField"; private static final String authorEmailField = "authorEmailField"; private static final String deployedDirectlyField = "deployedDirectly"; private static final String compileVersionField = "compileVersion"; private static final String buildTimeField = "buildTime"; private static final String sourceUrlField = "sourceUrl"; private static final String bundleHashField = "bundleHash"; private static final String lastQueriedField = "lastQueried"; private static final String lastWrittenField = "lastWritten"; private static final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private static final String lastWritesPerSecondField = "lastWritesPerSecond"; private static final String jobStatusField = "jobStatus"; private static final String jobTypeField = "jobType"; private static final String pausedUntilField = "pausedUntil"; private static final String deploymentMetricsField = "metrics"; private static final String deploymentMetricsQPSField = "queriesPerSecond"; private static final String deploymentMetricsWPSField = "writesPerSecond"; private static final String deploymentMetricsDocsField = "documentCount"; private static final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private static final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private static final String deploymentMetricsUpdateTime = "lastUpdated"; private static final String deploymentMetricsWarningsField = "warnings"; private static final String rotationStatusField = "rotationStatus2"; private static final String rotationIdField = "rotationId"; private static final String lastUpdatedField = "lastUpdated"; private static final String rotationStateField = "state"; private static final String statusField = "status"; private static final String quotaUsageRateField = "quotaUsageRate"; private static final String deploymentCostField = "cost"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serialized()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId)); application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value())); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); deployKeysToSlime(application.deployKeys(), root.setArray(pemDeployKeysField)); application.latestVersion().ifPresent(version -> toSlime(version, root.setObject(latestVersionField))); versionsToSlime(application, root.setArray(versionsField)); instancesToSlime(application, root.setArray(instancesField)); return slime; } private void instancesToSlime(Application application, Cursor array) { for (Instance instance : application.instances().values()) { Cursor instanceObject = array.addObject(); instanceObject.setString(instanceNameField, instance.name().value()); deploymentsToSlime(instance.deployments().values(), instanceObject.setArray(deploymentsField)); toSlime(instance.jobPauses(), instanceObject.setObject(deploymentJobsField)); assignedRotationsToSlime(instance.rotations(), instanceObject); toSlime(instance.rotationStatus(), instanceObject.setArray(rotationStatusField)); toSlime(instance.change(), instanceObject, deployingField); instance.latestDeployed().ifPresent(version -> toSlime(version, instanceObject.setObject(latestDeployedField))); } } private void deployKeysToSlime(Set<PublicKey> deployKeys, Cursor array) { deployKeys.forEach(key -> array.addString(KeyUtils.toPem(key))); } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); object.setDouble(quotaUsageRateField, deployment.quota().rate()); deployment.cost().ifPresent(cost -> object.setDouble(deploymentCostField, cost)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { applicationVersion.buildNumber().ifPresent(number -> object.setLong(applicationBuildNumberField, number)); applicationVersion.source().ifPresent(source -> toSlime(source, object.setObject(sourceRevisionField))); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url)); applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit)); object.setBool(deployedDirectlyField, applicationVersion.isDeployedDirectly()); } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(Map<JobType, Instant> jobPauses, Cursor cursor) { Cursor jobStatusArray = cursor.setArray(jobStatusField); jobPauses.forEach((type, until) -> { Cursor jobPauseObject = jobStatusArray.addObject(); jobPauseObject.setString(jobTypeField, type.jobName()); jobPauseObject.setLong(pausedUntilField, until.toEpochMilli()); }); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(RotationStatus status, Cursor array) { status.asMap().forEach((rotationId, targets) -> { Cursor rotationObject = array.addObject(); rotationObject.setString(rotationIdField, rotationId.asString()); rotationObject.setLong(lastUpdatedField, targets.lastUpdated().toEpochMilli()); Cursor statusArray = rotationObject.setArray(statusField); targets.asMap().forEach((zone, state) -> { Cursor statusObject = statusArray.addObject(); zoneIdToSlime(zone, statusObject); statusObject.setString(rotationStateField, state.name()); }); }); } private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent) { var rotationsArray = parent.setArray(assignedRotationsField); for (var rotation : rotations) { var object = rotationsArray.addObject(); object.setString(assignedRotationEndpointField, rotation.endpointId().id()); object.setString(assignedRotationRotationField, rotation.rotationId().asString()); object.setString(assignedRotationClusterField, rotation.clusterId().value()); var regionsArray = object.setArray(assignedRotationRegionsField); for (var region : rotation.regions()) { regionsArray.addString(region.value()); } } } public Application fromSlime(byte[] data) { return fromSlime(SlimeUtils.jsonToSlime(data)); } private Application fromSlime(Slime slime) { Inspector root = slime.get(); TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString()); Instant createdAt = SlimeUtils.instant(root.field(createdAtField)); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); Optional<IssueId> deploymentIssueId = SlimeUtils.optionalString(root.field(deploymentIssueField)).map(IssueId::from); Optional<IssueId> ownershipIssueId = SlimeUtils.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = SlimeUtils.optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = SlimeUtils.optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Set<PublicKey> deployKeys = deployKeysFromSlime(root.field(pemDeployKeysField)); List<Instance> instances = instancesFromSlime(id, root.field(instancesField)); OptionalLong projectId = SlimeUtils.optionalLong(root.field(projectIdField)); Optional<ApplicationVersion> latestVersion = latestVersionFromSlime(root.field(latestVersionField)); SortedSet<ApplicationVersion> versions = versionsFromSlime(root.field(versionsField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, deployKeys, projectId, latestVersion, versions, instances); } private Optional<ApplicationVersion> latestVersionFromSlime(Inspector latestVersionObject) { return Optional.of(applicationVersionFromSlime(latestVersionObject)) .filter(version -> ! version.isUnknown()); } private SortedSet<ApplicationVersion> versionsFromSlime(Inspector versionsObject) { SortedSet<ApplicationVersion> versions = new TreeSet<>(); versionsObject.traverse((ArrayTraverser) (name, object) -> versions.add(applicationVersionFromSlime(object))); return versions; } private List<Instance> instancesFromSlime(TenantAndApplicationId id, Inspector field) { List<Instance> instances = new ArrayList<>(); field.traverse((ArrayTraverser) (name, object) -> { InstanceName instanceName = InstanceName.from(object.field(instanceNameField).asString()); List<Deployment> deployments = deploymentsFromSlime(object.field(deploymentsField)); Map<JobType, Instant> jobPauses = jobPausesFromSlime(object.field(deploymentJobsField)); List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(object); RotationStatus rotationStatus = rotationStatusFromSlime(object); Change change = changeFromSlime(object.field(deployingField)); Optional<ApplicationVersion> latestDeployed = latestVersionFromSlime(object.field(latestDeployedField)); instances.add(new Instance(id.instance(instanceName), deployments, jobPauses, assignedRotations, rotationStatus, change, latestDeployed)); }); return instances; } private Set<PublicKey> deployKeysFromSlime(Inspector array) { Set<PublicKey> keys = new LinkedHashSet<>(); array.traverse((ArrayTraverser) (__, key) -> keys.add(KeyUtils.fromPemEncodedPublicKey(key.asString()))); return keys; } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), SlimeUtils.instant(deploymentObject.field(deployTimeField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(SlimeUtils.optionalInstant(deploymentObject.field(lastQueriedField)), SlimeUtils.optionalInstant(deploymentObject.field(lastWrittenField)), SlimeUtils.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), SlimeUtils.optionalDouble(deploymentObject.field(lastWritesPerSecondField))), QuotaUsage.create(SlimeUtils.optionalDouble(deploymentObject.field(quotaUsageRateField))), SlimeUtils.optionalDouble(deploymentObject.field(deploymentCostField))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = SlimeUtils.optionalInstant(object.field(deploymentMetricsUpdateTime)); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private RotationStatus rotationStatusFromSlime(Inspector parentObject) { var object = parentObject.field(rotationStatusField); var statusMap = new LinkedHashMap<RotationId, RotationStatus.Targets>(); object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()), new RotationStatus.Targets( singleRotationStatusFromSlime(statusObject.field(statusField)), SlimeUtils.instant(statusObject.field(lastUpdatedField))))); return RotationStatus.from(statusMap); } private Map<ZoneId, RotationState> singleRotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<ZoneId, RotationState> rotationStatus = new LinkedHashMap<>(); object.traverse((ArrayTraverser) (idx, statusObject) -> { var zone = zoneIdFromSlime(statusObject); var status = RotationState.valueOf(statusObject.field(rotationStateField).asString()); rotationStatus.put(zone, status); }); return Collections.unmodifiableMap(rotationStatus); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = SlimeUtils.optionalLong(object.field(applicationBuildNumberField)); if (applicationBuildNumber.isEmpty()) return ApplicationVersion.unknown; Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); Optional<String> authorEmail = SlimeUtils.optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = SlimeUtils.optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = SlimeUtils.optionalInstant(object.field(buildTimeField)); Optional<String> sourceUrl = SlimeUtils.optionalString(object.field(sourceUrlField)); Optional<String> commit = SlimeUtils.optionalString(object.field(commitField)); boolean deployedDirectly = object.field(deployedDirectlyField).asBool(); Optional<String> bundleHash = SlimeUtils.optionalString(object.field(bundleHashField)); return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit, deployedDirectly, bundleHash); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private Map<JobType, Instant> jobPausesFromSlime(Inspector object) { Map<JobType, Instant> jobPauses = new HashMap<>(); object.field(jobStatusField).traverse((ArrayTraverser) (__, jobPauseObject) -> JobType.fromOptionalJobName(jobPauseObject.field(jobTypeField).asString()) .ifPresent(jobType -> jobPauses.put(jobType, SlimeUtils.instant(jobPauseObject.field(pausedUntilField))))); return jobPauses; } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<AssignedRotation> assignedRotationsFromSlime(Inspector root) { var assignedRotations = new LinkedHashMap<EndpointId, AssignedRotation>(); root.field(assignedRotationsField).traverse((ArrayTraverser) (i, inspector) -> { var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString()); var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString()); var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString()); var regions = new LinkedHashSet<RegionName>(); inspector.field(assignedRotationRegionsField).traverse((ArrayTraverser) (j, regionInspector) -> { regions.add(RegionName.from(regionInspector.asString())); }); assignedRotations.putIfAbsent(endpointId, new AssignedRotation(clusterId, endpointId, rotationId, regions)); }); return List.copyOf(assignedRotations.values()); } }
class ApplicationSerializer { private static final String idField = "id"; private static final String createdAtField = "createdAt"; private static final String deploymentSpecField = "deploymentSpecField"; private static final String validationOverridesField = "validationOverrides"; private static final String instancesField = "instances"; private static final String deployingField = "deployingField"; private static final String projectIdField = "projectId"; private static final String latestVersionField = "latestVersion"; private static final String versionsField = "versions"; private static final String pinnedField = "pinned"; private static final String deploymentIssueField = "deploymentIssueId"; private static final String ownershipIssueIdField = "ownershipIssueId"; private static final String ownerField = "confirmedOwner"; private static final String majorVersionField = "majorVersion"; private static final String writeQualityField = "writeQuality"; private static final String queryQualityField = "queryQuality"; private static final String pemDeployKeysField = "pemDeployKeys"; private static final String assignedRotationClusterField = "clusterId"; private static final String assignedRotationRotationField = "rotationId"; private static final String assignedRotationRegionsField = "regions"; private static final String versionField = "version"; private static final String instanceNameField = "instanceName"; private static final String deploymentsField = "deployments"; private static final String deploymentJobsField = "deploymentJobs"; private static final String assignedRotationsField = "assignedRotations"; private static final String assignedRotationEndpointField = "endpointId"; private static final String latestDeployedField = "latestDeployed"; private static final String zoneField = "zone"; private static final String environmentField = "environment"; private static final String regionField = "region"; private static final String deployTimeField = "deployTime"; private static final String applicationBuildNumberField = "applicationBuildNumber"; private static final String applicationPackageRevisionField = "applicationPackageRevision"; private static final String sourceRevisionField = "sourceRevision"; private static final String repositoryField = "repositoryField"; private static final String branchField = "branchField"; private static final String commitField = "commitField"; private static final String authorEmailField = "authorEmailField"; private static final String deployedDirectlyField = "deployedDirectly"; private static final String compileVersionField = "compileVersion"; private static final String buildTimeField = "buildTime"; private static final String sourceUrlField = "sourceUrl"; private static final String bundleHashField = "bundleHash"; private static final String lastQueriedField = "lastQueried"; private static final String lastWrittenField = "lastWritten"; private static final String lastQueriesPerSecondField = "lastQueriesPerSecond"; private static final String lastWritesPerSecondField = "lastWritesPerSecond"; private static final String jobStatusField = "jobStatus"; private static final String jobTypeField = "jobType"; private static final String pausedUntilField = "pausedUntil"; private static final String deploymentMetricsField = "metrics"; private static final String deploymentMetricsQPSField = "queriesPerSecond"; private static final String deploymentMetricsWPSField = "writesPerSecond"; private static final String deploymentMetricsDocsField = "documentCount"; private static final String deploymentMetricsQueryLatencyField = "queryLatencyMillis"; private static final String deploymentMetricsWriteLatencyField = "writeLatencyMillis"; private static final String deploymentMetricsUpdateTime = "lastUpdated"; private static final String deploymentMetricsWarningsField = "warnings"; private static final String rotationStatusField = "rotationStatus2"; private static final String rotationIdField = "rotationId"; private static final String lastUpdatedField = "lastUpdated"; private static final String rotationStateField = "state"; private static final String statusField = "status"; private static final String quotaUsageRateField = "quotaUsageRate"; private static final String deploymentCostField = "cost"; public Slime toSlime(Application application) { Slime slime = new Slime(); Cursor root = slime.setObject(); root.setString(idField, application.id().serialized()); root.setLong(createdAtField, application.createdAt().toEpochMilli()); root.setString(deploymentSpecField, application.deploymentSpec().xmlForm()); root.setString(validationOverridesField, application.validationOverrides().xmlForm()); application.projectId().ifPresent(projectId -> root.setLong(projectIdField, projectId)); application.deploymentIssueId().ifPresent(jiraIssueId -> root.setString(deploymentIssueField, jiraIssueId.value())); application.ownershipIssueId().ifPresent(issueId -> root.setString(ownershipIssueIdField, issueId.value())); application.owner().ifPresent(owner -> root.setString(ownerField, owner.username())); application.majorVersion().ifPresent(majorVersion -> root.setLong(majorVersionField, majorVersion)); root.setDouble(queryQualityField, application.metrics().queryServiceQuality()); root.setDouble(writeQualityField, application.metrics().writeServiceQuality()); deployKeysToSlime(application.deployKeys(), root.setArray(pemDeployKeysField)); application.latestVersion().ifPresent(version -> toSlime(version, root.setObject(latestVersionField))); versionsToSlime(application, root.setArray(versionsField)); instancesToSlime(application, root.setArray(instancesField)); return slime; } private void instancesToSlime(Application application, Cursor array) { for (Instance instance : application.instances().values()) { Cursor instanceObject = array.addObject(); instanceObject.setString(instanceNameField, instance.name().value()); deploymentsToSlime(instance.deployments().values(), instanceObject.setArray(deploymentsField)); toSlime(instance.jobPauses(), instanceObject.setObject(deploymentJobsField)); assignedRotationsToSlime(instance.rotations(), instanceObject); toSlime(instance.rotationStatus(), instanceObject.setArray(rotationStatusField)); toSlime(instance.change(), instanceObject, deployingField); instance.latestDeployed().ifPresent(version -> toSlime(version, instanceObject.setObject(latestDeployedField))); } } private void deployKeysToSlime(Set<PublicKey> deployKeys, Cursor array) { deployKeys.forEach(key -> array.addString(KeyUtils.toPem(key))); } private void deploymentsToSlime(Collection<Deployment> deployments, Cursor array) { for (Deployment deployment : deployments) deploymentToSlime(deployment, array.addObject()); } private void deploymentToSlime(Deployment deployment, Cursor object) { zoneIdToSlime(deployment.zone(), object.setObject(zoneField)); object.setString(versionField, deployment.version().toString()); object.setLong(deployTimeField, deployment.at().toEpochMilli()); toSlime(deployment.applicationVersion(), object.setObject(applicationPackageRevisionField)); deploymentMetricsToSlime(deployment.metrics(), object); deployment.activity().lastQueried().ifPresent(instant -> object.setLong(lastQueriedField, instant.toEpochMilli())); deployment.activity().lastWritten().ifPresent(instant -> object.setLong(lastWrittenField, instant.toEpochMilli())); deployment.activity().lastQueriesPerSecond().ifPresent(value -> object.setDouble(lastQueriesPerSecondField, value)); deployment.activity().lastWritesPerSecond().ifPresent(value -> object.setDouble(lastWritesPerSecondField, value)); object.setDouble(quotaUsageRateField, deployment.quota().rate()); deployment.cost().ifPresent(cost -> object.setDouble(deploymentCostField, cost)); } private void deploymentMetricsToSlime(DeploymentMetrics metrics, Cursor object) { Cursor root = object.setObject(deploymentMetricsField); root.setDouble(deploymentMetricsQPSField, metrics.queriesPerSecond()); root.setDouble(deploymentMetricsWPSField, metrics.writesPerSecond()); root.setDouble(deploymentMetricsDocsField, metrics.documentCount()); root.setDouble(deploymentMetricsQueryLatencyField, metrics.queryLatencyMillis()); root.setDouble(deploymentMetricsWriteLatencyField, metrics.writeLatencyMillis()); metrics.instant().ifPresent(instant -> root.setLong(deploymentMetricsUpdateTime, instant.toEpochMilli())); if (!metrics.warnings().isEmpty()) { Cursor warningsObject = root.setObject(deploymentMetricsWarningsField); metrics.warnings().forEach((warning, count) -> warningsObject.setLong(warning.name(), count)); } } private void zoneIdToSlime(ZoneId zone, Cursor object) { object.setString(environmentField, zone.environment().value()); object.setString(regionField, zone.region().value()); } private void toSlime(ApplicationVersion applicationVersion, Cursor object) { applicationVersion.buildNumber().ifPresent(number -> object.setLong(applicationBuildNumberField, number)); applicationVersion.source().ifPresent(source -> toSlime(source, object.setObject(sourceRevisionField))); applicationVersion.authorEmail().ifPresent(email -> object.setString(authorEmailField, email)); applicationVersion.compileVersion().ifPresent(version -> object.setString(compileVersionField, version.toString())); applicationVersion.buildTime().ifPresent(time -> object.setLong(buildTimeField, time.toEpochMilli())); applicationVersion.sourceUrl().ifPresent(url -> object.setString(sourceUrlField, url)); applicationVersion.commit().ifPresent(commit -> object.setString(commitField, commit)); object.setBool(deployedDirectlyField, applicationVersion.isDeployedDirectly()); } private void toSlime(SourceRevision sourceRevision, Cursor object) { object.setString(repositoryField, sourceRevision.repository()); object.setString(branchField, sourceRevision.branch()); object.setString(commitField, sourceRevision.commit()); } private void toSlime(Map<JobType, Instant> jobPauses, Cursor cursor) { Cursor jobStatusArray = cursor.setArray(jobStatusField); jobPauses.forEach((type, until) -> { Cursor jobPauseObject = jobStatusArray.addObject(); jobPauseObject.setString(jobTypeField, type.jobName()); jobPauseObject.setLong(pausedUntilField, until.toEpochMilli()); }); } private void toSlime(Change deploying, Cursor parentObject, String fieldName) { if (deploying.isEmpty()) return; Cursor object = parentObject.setObject(fieldName); if (deploying.platform().isPresent()) object.setString(versionField, deploying.platform().get().toString()); if (deploying.application().isPresent()) toSlime(deploying.application().get(), object); if (deploying.isPinned()) object.setBool(pinnedField, true); } private void toSlime(RotationStatus status, Cursor array) { status.asMap().forEach((rotationId, targets) -> { Cursor rotationObject = array.addObject(); rotationObject.setString(rotationIdField, rotationId.asString()); rotationObject.setLong(lastUpdatedField, targets.lastUpdated().toEpochMilli()); Cursor statusArray = rotationObject.setArray(statusField); targets.asMap().forEach((zone, state) -> { Cursor statusObject = statusArray.addObject(); zoneIdToSlime(zone, statusObject); statusObject.setString(rotationStateField, state.name()); }); }); } private void assignedRotationsToSlime(List<AssignedRotation> rotations, Cursor parent) { var rotationsArray = parent.setArray(assignedRotationsField); for (var rotation : rotations) { var object = rotationsArray.addObject(); object.setString(assignedRotationEndpointField, rotation.endpointId().id()); object.setString(assignedRotationRotationField, rotation.rotationId().asString()); object.setString(assignedRotationClusterField, rotation.clusterId().value()); var regionsArray = object.setArray(assignedRotationRegionsField); for (var region : rotation.regions()) { regionsArray.addString(region.value()); } } } public Application fromSlime(byte[] data) { return fromSlime(SlimeUtils.jsonToSlime(data)); } private Application fromSlime(Slime slime) { Inspector root = slime.get(); TenantAndApplicationId id = TenantAndApplicationId.fromSerialized(root.field(idField).asString()); Instant createdAt = SlimeUtils.instant(root.field(createdAtField)); DeploymentSpec deploymentSpec = DeploymentSpec.fromXml(root.field(deploymentSpecField).asString(), false); ValidationOverrides validationOverrides = ValidationOverrides.fromXml(root.field(validationOverridesField).asString()); Optional<IssueId> deploymentIssueId = SlimeUtils.optionalString(root.field(deploymentIssueField)).map(IssueId::from); Optional<IssueId> ownershipIssueId = SlimeUtils.optionalString(root.field(ownershipIssueIdField)).map(IssueId::from); Optional<User> owner = SlimeUtils.optionalString(root.field(ownerField)).map(User::from); OptionalInt majorVersion = SlimeUtils.optionalInteger(root.field(majorVersionField)); ApplicationMetrics metrics = new ApplicationMetrics(root.field(queryQualityField).asDouble(), root.field(writeQualityField).asDouble()); Set<PublicKey> deployKeys = deployKeysFromSlime(root.field(pemDeployKeysField)); List<Instance> instances = instancesFromSlime(id, root.field(instancesField)); OptionalLong projectId = SlimeUtils.optionalLong(root.field(projectIdField)); Optional<ApplicationVersion> latestVersion = latestVersionFromSlime(root.field(latestVersionField)); SortedSet<ApplicationVersion> versions = versionsFromSlime(root.field(versionsField)); return new Application(id, createdAt, deploymentSpec, validationOverrides, deploymentIssueId, ownershipIssueId, owner, majorVersion, metrics, deployKeys, projectId, latestVersion, versions, instances); } private Optional<ApplicationVersion> latestVersionFromSlime(Inspector latestVersionObject) { return Optional.of(applicationVersionFromSlime(latestVersionObject)) .filter(version -> ! version.isUnknown()); } private SortedSet<ApplicationVersion> versionsFromSlime(Inspector versionsObject) { SortedSet<ApplicationVersion> versions = new TreeSet<>(); versionsObject.traverse((ArrayTraverser) (name, object) -> versions.add(applicationVersionFromSlime(object))); return versions; } private List<Instance> instancesFromSlime(TenantAndApplicationId id, Inspector field) { List<Instance> instances = new ArrayList<>(); field.traverse((ArrayTraverser) (name, object) -> { InstanceName instanceName = InstanceName.from(object.field(instanceNameField).asString()); List<Deployment> deployments = deploymentsFromSlime(object.field(deploymentsField)); Map<JobType, Instant> jobPauses = jobPausesFromSlime(object.field(deploymentJobsField)); List<AssignedRotation> assignedRotations = assignedRotationsFromSlime(object); RotationStatus rotationStatus = rotationStatusFromSlime(object); Change change = changeFromSlime(object.field(deployingField)); Optional<ApplicationVersion> latestDeployed = latestVersionFromSlime(object.field(latestDeployedField)); instances.add(new Instance(id.instance(instanceName), deployments, jobPauses, assignedRotations, rotationStatus, change, latestDeployed)); }); return instances; } private Set<PublicKey> deployKeysFromSlime(Inspector array) { Set<PublicKey> keys = new LinkedHashSet<>(); array.traverse((ArrayTraverser) (__, key) -> keys.add(KeyUtils.fromPemEncodedPublicKey(key.asString()))); return keys; } private List<Deployment> deploymentsFromSlime(Inspector array) { List<Deployment> deployments = new ArrayList<>(); array.traverse((ArrayTraverser) (int i, Inspector item) -> deployments.add(deploymentFromSlime(item))); return deployments; } private Deployment deploymentFromSlime(Inspector deploymentObject) { return new Deployment(zoneIdFromSlime(deploymentObject.field(zoneField)), applicationVersionFromSlime(deploymentObject.field(applicationPackageRevisionField)), Version.fromString(deploymentObject.field(versionField).asString()), SlimeUtils.instant(deploymentObject.field(deployTimeField)), deploymentMetricsFromSlime(deploymentObject.field(deploymentMetricsField)), DeploymentActivity.create(SlimeUtils.optionalInstant(deploymentObject.field(lastQueriedField)), SlimeUtils.optionalInstant(deploymentObject.field(lastWrittenField)), SlimeUtils.optionalDouble(deploymentObject.field(lastQueriesPerSecondField)), SlimeUtils.optionalDouble(deploymentObject.field(lastWritesPerSecondField))), QuotaUsage.create(SlimeUtils.optionalDouble(deploymentObject.field(quotaUsageRateField))), SlimeUtils.optionalDouble(deploymentObject.field(deploymentCostField))); } private DeploymentMetrics deploymentMetricsFromSlime(Inspector object) { Optional<Instant> instant = SlimeUtils.optionalInstant(object.field(deploymentMetricsUpdateTime)); return new DeploymentMetrics(object.field(deploymentMetricsQPSField).asDouble(), object.field(deploymentMetricsWPSField).asDouble(), object.field(deploymentMetricsDocsField).asDouble(), object.field(deploymentMetricsQueryLatencyField).asDouble(), object.field(deploymentMetricsWriteLatencyField).asDouble(), instant, deploymentWarningsFrom(object.field(deploymentMetricsWarningsField))); } private Map<DeploymentMetrics.Warning, Integer> deploymentWarningsFrom(Inspector object) { Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); object.traverse((ObjectTraverser) (name, value) -> warnings.put(DeploymentMetrics.Warning.valueOf(name), (int) value.asLong())); return Collections.unmodifiableMap(warnings); } private RotationStatus rotationStatusFromSlime(Inspector parentObject) { var object = parentObject.field(rotationStatusField); var statusMap = new LinkedHashMap<RotationId, RotationStatus.Targets>(); object.traverse((ArrayTraverser) (idx, statusObject) -> statusMap.put(new RotationId(statusObject.field(rotationIdField).asString()), new RotationStatus.Targets( singleRotationStatusFromSlime(statusObject.field(statusField)), SlimeUtils.instant(statusObject.field(lastUpdatedField))))); return RotationStatus.from(statusMap); } private Map<ZoneId, RotationState> singleRotationStatusFromSlime(Inspector object) { if (!object.valid()) { return Collections.emptyMap(); } Map<ZoneId, RotationState> rotationStatus = new LinkedHashMap<>(); object.traverse((ArrayTraverser) (idx, statusObject) -> { var zone = zoneIdFromSlime(statusObject); var status = RotationState.valueOf(statusObject.field(rotationStateField).asString()); rotationStatus.put(zone, status); }); return Collections.unmodifiableMap(rotationStatus); } private ZoneId zoneIdFromSlime(Inspector object) { return ZoneId.from(object.field(environmentField).asString(), object.field(regionField).asString()); } private ApplicationVersion applicationVersionFromSlime(Inspector object) { if ( ! object.valid()) return ApplicationVersion.unknown; OptionalLong applicationBuildNumber = SlimeUtils.optionalLong(object.field(applicationBuildNumberField)); if (applicationBuildNumber.isEmpty()) return ApplicationVersion.unknown; Optional<SourceRevision> sourceRevision = sourceRevisionFromSlime(object.field(sourceRevisionField)); Optional<String> authorEmail = SlimeUtils.optionalString(object.field(authorEmailField)); Optional<Version> compileVersion = SlimeUtils.optionalString(object.field(compileVersionField)).map(Version::fromString); Optional<Instant> buildTime = SlimeUtils.optionalInstant(object.field(buildTimeField)); Optional<String> sourceUrl = SlimeUtils.optionalString(object.field(sourceUrlField)); Optional<String> commit = SlimeUtils.optionalString(object.field(commitField)); boolean deployedDirectly = object.field(deployedDirectlyField).asBool(); Optional<String> bundleHash = SlimeUtils.optionalString(object.field(bundleHashField)); return new ApplicationVersion(sourceRevision, applicationBuildNumber, authorEmail, compileVersion, buildTime, sourceUrl, commit, deployedDirectly, bundleHash); } private Optional<SourceRevision> sourceRevisionFromSlime(Inspector object) { if ( ! object.valid()) return Optional.empty(); return Optional.of(new SourceRevision(object.field(repositoryField).asString(), object.field(branchField).asString(), object.field(commitField).asString())); } private Map<JobType, Instant> jobPausesFromSlime(Inspector object) { Map<JobType, Instant> jobPauses = new HashMap<>(); object.field(jobStatusField).traverse((ArrayTraverser) (__, jobPauseObject) -> JobType.fromOptionalJobName(jobPauseObject.field(jobTypeField).asString()) .ifPresent(jobType -> jobPauses.put(jobType, SlimeUtils.instant(jobPauseObject.field(pausedUntilField))))); return jobPauses; } private Change changeFromSlime(Inspector object) { if ( ! object.valid()) return Change.empty(); Inspector versionFieldValue = object.field(versionField); Change change = Change.empty(); if (versionFieldValue.valid()) change = Change.of(Version.fromString(versionFieldValue.asString())); if (object.field(applicationBuildNumberField).valid()) change = change.with(applicationVersionFromSlime(object)); if (object.field(pinnedField).asBool()) change = change.withPin(); return change; } private List<AssignedRotation> assignedRotationsFromSlime(Inspector root) { var assignedRotations = new LinkedHashMap<EndpointId, AssignedRotation>(); root.field(assignedRotationsField).traverse((ArrayTraverser) (i, inspector) -> { var clusterId = new ClusterSpec.Id(inspector.field(assignedRotationClusterField).asString()); var endpointId = EndpointId.of(inspector.field(assignedRotationEndpointField).asString()); var rotationId = new RotationId(inspector.field(assignedRotationRotationField).asString()); var regions = new LinkedHashSet<RegionName>(); inspector.field(assignedRotationRegionsField).traverse((ArrayTraverser) (j, regionInspector) -> { regions.add(RegionName.from(regionInspector.asString())); }); assignedRotations.putIfAbsent(endpointId, new AssignedRotation(clusterId, endpointId, rotationId, regions)); }); return List.copyOf(assignedRotations.values()); } }
It's probably not good to fail with an exception for an issue like this? Log error and return null instead? Or make the fingerprint be the error message? :)
private static String certificateFingerprint(byte[] derEncoded) { try { return HexDump.toHexString(MessageDigest.getInstance("SHA-1").digest(derEncoded)); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } }
throw new RuntimeException(e);
private static String certificateFingerprint(byte[] derEncoded) { try { return HexDump.toHexString(MessageDigest.getInstance("SHA-1").digest(derEncoded)); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } }
class ConnectionInfo { private final UUID uuid; private final long createdAt; private final InetSocketAddress localAddress; private final InetSocketAddress peerAddress; private long closedAt = 0; private long httpBytesReceived = 0; private long httpBytesSent = 0; private long requests = 0; private long responses = 0; private InetSocketAddress remoteAddress; private byte[] sslSessionId; private String sslProtocol; private String sslCipherSuite; private String sslPeerSubject; private Date sslPeerNotBefore; private Date sslPeerNotAfter; private List<SNIServerName> sslSniServerNames; private String sslPeerIssuerSubject; private byte[] sslPeerEncodedCertificate; private SSLHandshakeException sslHandshakeException; private List<String> sslSubjectAlternativeNames; private String proxyProtocolVersion; private String httpProtocol; private ConnectionInfo(UUID uuid, long createdAt, InetSocketAddress localAddress, InetSocketAddress peerAddress) { this.uuid = uuid; this.createdAt = createdAt; this.localAddress = localAddress; this.peerAddress = peerAddress; } static ConnectionInfo from(SocketChannelEndPoint endpoint) { return new ConnectionInfo( UUID.randomUUID(), endpoint.getCreatedTimeStamp(), endpoint.getLocalAddress(), endpoint.getRemoteAddress()); } synchronized UUID uuid() { return uuid; } synchronized ConnectionInfo setClosedAt(long closedAt) { this.closedAt = closedAt; return this; } synchronized ConnectionInfo setHttpBytes(long received, long sent) { this.httpBytesReceived = received; this.httpBytesSent = sent; return this; } synchronized ConnectionInfo incrementRequests() { ++this.requests; return this; } synchronized ConnectionInfo incrementResponses() { ++this.responses; return this; } synchronized ConnectionInfo setRemoteAddress(InetSocketAddress remoteAddress) { this.remoteAddress = remoteAddress; return this; } synchronized ConnectionInfo setSslSessionDetails(SSLSession session) { this.sslCipherSuite = session.getCipherSuite(); this.sslProtocol = session.getProtocol(); this.sslSessionId = session.getId(); if (session instanceof ExtendedSSLSession) { ExtendedSSLSession extendedSession = (ExtendedSSLSession) session; this.sslSniServerNames = extendedSession.getRequestedServerNames(); } try { this.sslPeerSubject = session.getPeerPrincipal().getName(); X509Certificate peerCertificate = (X509Certificate) session.getPeerCertificates()[0]; this.sslPeerNotBefore = peerCertificate.getNotBefore(); this.sslPeerNotAfter = peerCertificate.getNotAfter(); this.sslSubjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream() .map(SubjectAlternativeName::getValue) .collect(Collectors.toList()); this.sslPeerIssuerSubject = peerCertificate.getIssuerDN().getName(); this.sslPeerEncodedCertificate = peerCertificate.getEncoded(); } catch (SSLPeerUnverifiedException | CertificateEncodingException e) { } return this; } synchronized ConnectionInfo setSslHandshakeFailure(SSLHandshakeException exception) { this.sslHandshakeException = exception; return this; } synchronized ConnectionInfo setHttpProtocol(String protocol) { this.httpProtocol = protocol; return this; } synchronized ConnectionInfo setProxyProtocolVersion(String version) { this.proxyProtocolVersion = version; return this; } synchronized ConnectionLogEntry toLogEntry() { ConnectionLogEntry.Builder builder = ConnectionLogEntry.builder(uuid, Instant.ofEpochMilli(createdAt)); if (closedAt > 0) { builder.withDuration((closedAt - createdAt) / 1000D); } if (httpBytesReceived > 0) { builder.withHttpBytesReceived(httpBytesReceived); } if (httpBytesSent > 0) { builder.withHttpBytesSent(httpBytesSent); } if (requests > 0) { builder.withRequests(requests); } if (responses > 0) { builder.withResponses(responses); } if (peerAddress != null) { builder.withPeerAddress(peerAddress.getHostString()) .withPeerPort(peerAddress.getPort()); } if (localAddress != null) { builder.withLocalAddress(localAddress.getHostString()) .withLocalPort(localAddress.getPort()); } if (remoteAddress != null) { builder.withRemoteAddress(remoteAddress.getHostString()) .withRemotePort(remoteAddress.getPort()); } if (sslProtocol != null && sslCipherSuite != null && sslSessionId != null) { builder.withSslProtocol(sslProtocol) .withSslCipherSuite(sslCipherSuite) .withSslSessionId(HexDump.toHexString(sslSessionId)); } if (sslSniServerNames != null) { sslSniServerNames.stream() .filter(name -> name instanceof SNIHostName && name.getType() == StandardConstants.SNI_HOST_NAME) .map(name -> ((SNIHostName) name).getAsciiName()) .findAny() .ifPresent(builder::withSslSniServerName); } if (sslPeerSubject != null && sslPeerNotAfter != null && sslPeerNotBefore != null && sslPeerIssuerSubject != null && sslPeerEncodedCertificate != null) { builder.withSslPeerSubject(sslPeerSubject) .withSslPeerIssuerSubject(sslPeerIssuerSubject) .withSslPeerNotAfter(sslPeerNotAfter.toInstant()) .withSslPeerNotBefore(sslPeerNotBefore.toInstant()) .withSslPeerFingerprint(certificateFingerprint(sslPeerEncodedCertificate)); } if (sslSubjectAlternativeNames != null && !sslSubjectAlternativeNames.isEmpty()) { builder.withSslSubjectAlternativeNames(sslSubjectAlternativeNames); } if (sslHandshakeException != null) { List<ExceptionEntry> exceptionChain = new ArrayList<>(); Throwable cause = sslHandshakeException; while (cause != null) { exceptionChain.add(new ExceptionEntry(cause.getClass().getName(), cause.getMessage())); cause = cause.getCause(); } String type = SslHandshakeFailure.fromSslHandshakeException(sslHandshakeException) .map(SslHandshakeFailure::failureType) .orElse("UNKNOWN"); builder.withSslHandshakeFailure(new ConnectionLogEntry.SslHandshakeFailure(type, exceptionChain)); } if (httpProtocol != null) { builder.withHttpProtocol(httpProtocol); } if (proxyProtocolVersion != null) { builder.withProxyProtocolVersion(proxyProtocolVersion); } return builder.build(); } }
class ConnectionInfo { private final UUID uuid; private final long createdAt; private final InetSocketAddress localAddress; private final InetSocketAddress peerAddress; private long closedAt = 0; private long httpBytesReceived = 0; private long httpBytesSent = 0; private long requests = 0; private long responses = 0; private InetSocketAddress remoteAddress; private byte[] sslSessionId; private String sslProtocol; private String sslCipherSuite; private String sslPeerSubject; private Date sslPeerNotBefore; private Date sslPeerNotAfter; private List<SNIServerName> sslSniServerNames; private String sslPeerIssuerSubject; private byte[] sslPeerEncodedCertificate; private SSLHandshakeException sslHandshakeException; private List<String> sslSubjectAlternativeNames; private String proxyProtocolVersion; private String httpProtocol; private ConnectionInfo(UUID uuid, long createdAt, InetSocketAddress localAddress, InetSocketAddress peerAddress) { this.uuid = uuid; this.createdAt = createdAt; this.localAddress = localAddress; this.peerAddress = peerAddress; } static ConnectionInfo from(SocketChannelEndPoint endpoint) { return new ConnectionInfo( UUID.randomUUID(), endpoint.getCreatedTimeStamp(), endpoint.getLocalAddress(), endpoint.getRemoteAddress()); } synchronized UUID uuid() { return uuid; } synchronized ConnectionInfo setClosedAt(long closedAt) { this.closedAt = closedAt; return this; } synchronized ConnectionInfo setHttpBytes(long received, long sent) { this.httpBytesReceived = received; this.httpBytesSent = sent; return this; } synchronized ConnectionInfo incrementRequests() { ++this.requests; return this; } synchronized ConnectionInfo incrementResponses() { ++this.responses; return this; } synchronized ConnectionInfo setRemoteAddress(InetSocketAddress remoteAddress) { this.remoteAddress = remoteAddress; return this; } synchronized ConnectionInfo setSslSessionDetails(SSLSession session) { this.sslCipherSuite = session.getCipherSuite(); this.sslProtocol = session.getProtocol(); this.sslSessionId = session.getId(); if (session instanceof ExtendedSSLSession) { ExtendedSSLSession extendedSession = (ExtendedSSLSession) session; this.sslSniServerNames = extendedSession.getRequestedServerNames(); } try { this.sslPeerSubject = session.getPeerPrincipal().getName(); X509Certificate peerCertificate = (X509Certificate) session.getPeerCertificates()[0]; this.sslPeerNotBefore = peerCertificate.getNotBefore(); this.sslPeerNotAfter = peerCertificate.getNotAfter(); this.sslSubjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream() .map(SubjectAlternativeName::getValue) .collect(Collectors.toList()); this.sslPeerIssuerSubject = peerCertificate.getIssuerDN().getName(); this.sslPeerEncodedCertificate = peerCertificate.getEncoded(); } catch (SSLPeerUnverifiedException | CertificateEncodingException e) { } return this; } synchronized ConnectionInfo setSslHandshakeFailure(SSLHandshakeException exception) { this.sslHandshakeException = exception; return this; } synchronized ConnectionInfo setHttpProtocol(String protocol) { this.httpProtocol = protocol; return this; } synchronized ConnectionInfo setProxyProtocolVersion(String version) { this.proxyProtocolVersion = version; return this; } synchronized ConnectionLogEntry toLogEntry() { ConnectionLogEntry.Builder builder = ConnectionLogEntry.builder(uuid, Instant.ofEpochMilli(createdAt)); if (closedAt > 0) { builder.withDuration((closedAt - createdAt) / 1000D); } if (httpBytesReceived > 0) { builder.withHttpBytesReceived(httpBytesReceived); } if (httpBytesSent > 0) { builder.withHttpBytesSent(httpBytesSent); } if (requests > 0) { builder.withRequests(requests); } if (responses > 0) { builder.withResponses(responses); } if (peerAddress != null) { builder.withPeerAddress(peerAddress.getHostString()) .withPeerPort(peerAddress.getPort()); } if (localAddress != null) { builder.withLocalAddress(localAddress.getHostString()) .withLocalPort(localAddress.getPort()); } if (remoteAddress != null) { builder.withRemoteAddress(remoteAddress.getHostString()) .withRemotePort(remoteAddress.getPort()); } if (sslProtocol != null && sslCipherSuite != null && sslSessionId != null) { builder.withSslProtocol(sslProtocol) .withSslCipherSuite(sslCipherSuite) .withSslSessionId(HexDump.toHexString(sslSessionId)); } if (sslSniServerNames != null) { sslSniServerNames.stream() .filter(name -> name instanceof SNIHostName && name.getType() == StandardConstants.SNI_HOST_NAME) .map(name -> ((SNIHostName) name).getAsciiName()) .findAny() .ifPresent(builder::withSslSniServerName); } if (sslPeerSubject != null && sslPeerNotAfter != null && sslPeerNotBefore != null && sslPeerIssuerSubject != null && sslPeerEncodedCertificate != null) { builder.withSslPeerSubject(sslPeerSubject) .withSslPeerIssuerSubject(sslPeerIssuerSubject) .withSslPeerNotAfter(sslPeerNotAfter.toInstant()) .withSslPeerNotBefore(sslPeerNotBefore.toInstant()) .withSslPeerFingerprint(certificateFingerprint(sslPeerEncodedCertificate)); } if (sslSubjectAlternativeNames != null && !sslSubjectAlternativeNames.isEmpty()) { builder.withSslSubjectAlternativeNames(sslSubjectAlternativeNames); } if (sslHandshakeException != null) { List<ExceptionEntry> exceptionChain = new ArrayList<>(); Throwable cause = sslHandshakeException; while (cause != null) { exceptionChain.add(new ExceptionEntry(cause.getClass().getName(), cause.getMessage())); cause = cause.getCause(); } String type = SslHandshakeFailure.fromSslHandshakeException(sslHandshakeException) .map(SslHandshakeFailure::failureType) .orElse("UNKNOWN"); builder.withSslHandshakeFailure(new ConnectionLogEntry.SslHandshakeFailure(type, exceptionChain)); } if (httpProtocol != null) { builder.withHttpProtocol(httpProtocol); } if (proxyProtocolVersion != null) { builder.withProxyProtocolVersion(proxyProtocolVersion); } return builder.build(); } }
The exception is thrown if the JVM implementation does not provide the SHA-1 digest algorithm. That should never happen.
private static String certificateFingerprint(byte[] derEncoded) { try { return HexDump.toHexString(MessageDigest.getInstance("SHA-1").digest(derEncoded)); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } }
throw new RuntimeException(e);
private static String certificateFingerprint(byte[] derEncoded) { try { return HexDump.toHexString(MessageDigest.getInstance("SHA-1").digest(derEncoded)); } catch (NoSuchAlgorithmException e) { throw new RuntimeException(e); } }
class ConnectionInfo { private final UUID uuid; private final long createdAt; private final InetSocketAddress localAddress; private final InetSocketAddress peerAddress; private long closedAt = 0; private long httpBytesReceived = 0; private long httpBytesSent = 0; private long requests = 0; private long responses = 0; private InetSocketAddress remoteAddress; private byte[] sslSessionId; private String sslProtocol; private String sslCipherSuite; private String sslPeerSubject; private Date sslPeerNotBefore; private Date sslPeerNotAfter; private List<SNIServerName> sslSniServerNames; private String sslPeerIssuerSubject; private byte[] sslPeerEncodedCertificate; private SSLHandshakeException sslHandshakeException; private List<String> sslSubjectAlternativeNames; private String proxyProtocolVersion; private String httpProtocol; private ConnectionInfo(UUID uuid, long createdAt, InetSocketAddress localAddress, InetSocketAddress peerAddress) { this.uuid = uuid; this.createdAt = createdAt; this.localAddress = localAddress; this.peerAddress = peerAddress; } static ConnectionInfo from(SocketChannelEndPoint endpoint) { return new ConnectionInfo( UUID.randomUUID(), endpoint.getCreatedTimeStamp(), endpoint.getLocalAddress(), endpoint.getRemoteAddress()); } synchronized UUID uuid() { return uuid; } synchronized ConnectionInfo setClosedAt(long closedAt) { this.closedAt = closedAt; return this; } synchronized ConnectionInfo setHttpBytes(long received, long sent) { this.httpBytesReceived = received; this.httpBytesSent = sent; return this; } synchronized ConnectionInfo incrementRequests() { ++this.requests; return this; } synchronized ConnectionInfo incrementResponses() { ++this.responses; return this; } synchronized ConnectionInfo setRemoteAddress(InetSocketAddress remoteAddress) { this.remoteAddress = remoteAddress; return this; } synchronized ConnectionInfo setSslSessionDetails(SSLSession session) { this.sslCipherSuite = session.getCipherSuite(); this.sslProtocol = session.getProtocol(); this.sslSessionId = session.getId(); if (session instanceof ExtendedSSLSession) { ExtendedSSLSession extendedSession = (ExtendedSSLSession) session; this.sslSniServerNames = extendedSession.getRequestedServerNames(); } try { this.sslPeerSubject = session.getPeerPrincipal().getName(); X509Certificate peerCertificate = (X509Certificate) session.getPeerCertificates()[0]; this.sslPeerNotBefore = peerCertificate.getNotBefore(); this.sslPeerNotAfter = peerCertificate.getNotAfter(); this.sslSubjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream() .map(SubjectAlternativeName::getValue) .collect(Collectors.toList()); this.sslPeerIssuerSubject = peerCertificate.getIssuerDN().getName(); this.sslPeerEncodedCertificate = peerCertificate.getEncoded(); } catch (SSLPeerUnverifiedException | CertificateEncodingException e) { } return this; } synchronized ConnectionInfo setSslHandshakeFailure(SSLHandshakeException exception) { this.sslHandshakeException = exception; return this; } synchronized ConnectionInfo setHttpProtocol(String protocol) { this.httpProtocol = protocol; return this; } synchronized ConnectionInfo setProxyProtocolVersion(String version) { this.proxyProtocolVersion = version; return this; } synchronized ConnectionLogEntry toLogEntry() { ConnectionLogEntry.Builder builder = ConnectionLogEntry.builder(uuid, Instant.ofEpochMilli(createdAt)); if (closedAt > 0) { builder.withDuration((closedAt - createdAt) / 1000D); } if (httpBytesReceived > 0) { builder.withHttpBytesReceived(httpBytesReceived); } if (httpBytesSent > 0) { builder.withHttpBytesSent(httpBytesSent); } if (requests > 0) { builder.withRequests(requests); } if (responses > 0) { builder.withResponses(responses); } if (peerAddress != null) { builder.withPeerAddress(peerAddress.getHostString()) .withPeerPort(peerAddress.getPort()); } if (localAddress != null) { builder.withLocalAddress(localAddress.getHostString()) .withLocalPort(localAddress.getPort()); } if (remoteAddress != null) { builder.withRemoteAddress(remoteAddress.getHostString()) .withRemotePort(remoteAddress.getPort()); } if (sslProtocol != null && sslCipherSuite != null && sslSessionId != null) { builder.withSslProtocol(sslProtocol) .withSslCipherSuite(sslCipherSuite) .withSslSessionId(HexDump.toHexString(sslSessionId)); } if (sslSniServerNames != null) { sslSniServerNames.stream() .filter(name -> name instanceof SNIHostName && name.getType() == StandardConstants.SNI_HOST_NAME) .map(name -> ((SNIHostName) name).getAsciiName()) .findAny() .ifPresent(builder::withSslSniServerName); } if (sslPeerSubject != null && sslPeerNotAfter != null && sslPeerNotBefore != null && sslPeerIssuerSubject != null && sslPeerEncodedCertificate != null) { builder.withSslPeerSubject(sslPeerSubject) .withSslPeerIssuerSubject(sslPeerIssuerSubject) .withSslPeerNotAfter(sslPeerNotAfter.toInstant()) .withSslPeerNotBefore(sslPeerNotBefore.toInstant()) .withSslPeerFingerprint(certificateFingerprint(sslPeerEncodedCertificate)); } if (sslSubjectAlternativeNames != null && !sslSubjectAlternativeNames.isEmpty()) { builder.withSslSubjectAlternativeNames(sslSubjectAlternativeNames); } if (sslHandshakeException != null) { List<ExceptionEntry> exceptionChain = new ArrayList<>(); Throwable cause = sslHandshakeException; while (cause != null) { exceptionChain.add(new ExceptionEntry(cause.getClass().getName(), cause.getMessage())); cause = cause.getCause(); } String type = SslHandshakeFailure.fromSslHandshakeException(sslHandshakeException) .map(SslHandshakeFailure::failureType) .orElse("UNKNOWN"); builder.withSslHandshakeFailure(new ConnectionLogEntry.SslHandshakeFailure(type, exceptionChain)); } if (httpProtocol != null) { builder.withHttpProtocol(httpProtocol); } if (proxyProtocolVersion != null) { builder.withProxyProtocolVersion(proxyProtocolVersion); } return builder.build(); } }
class ConnectionInfo { private final UUID uuid; private final long createdAt; private final InetSocketAddress localAddress; private final InetSocketAddress peerAddress; private long closedAt = 0; private long httpBytesReceived = 0; private long httpBytesSent = 0; private long requests = 0; private long responses = 0; private InetSocketAddress remoteAddress; private byte[] sslSessionId; private String sslProtocol; private String sslCipherSuite; private String sslPeerSubject; private Date sslPeerNotBefore; private Date sslPeerNotAfter; private List<SNIServerName> sslSniServerNames; private String sslPeerIssuerSubject; private byte[] sslPeerEncodedCertificate; private SSLHandshakeException sslHandshakeException; private List<String> sslSubjectAlternativeNames; private String proxyProtocolVersion; private String httpProtocol; private ConnectionInfo(UUID uuid, long createdAt, InetSocketAddress localAddress, InetSocketAddress peerAddress) { this.uuid = uuid; this.createdAt = createdAt; this.localAddress = localAddress; this.peerAddress = peerAddress; } static ConnectionInfo from(SocketChannelEndPoint endpoint) { return new ConnectionInfo( UUID.randomUUID(), endpoint.getCreatedTimeStamp(), endpoint.getLocalAddress(), endpoint.getRemoteAddress()); } synchronized UUID uuid() { return uuid; } synchronized ConnectionInfo setClosedAt(long closedAt) { this.closedAt = closedAt; return this; } synchronized ConnectionInfo setHttpBytes(long received, long sent) { this.httpBytesReceived = received; this.httpBytesSent = sent; return this; } synchronized ConnectionInfo incrementRequests() { ++this.requests; return this; } synchronized ConnectionInfo incrementResponses() { ++this.responses; return this; } synchronized ConnectionInfo setRemoteAddress(InetSocketAddress remoteAddress) { this.remoteAddress = remoteAddress; return this; } synchronized ConnectionInfo setSslSessionDetails(SSLSession session) { this.sslCipherSuite = session.getCipherSuite(); this.sslProtocol = session.getProtocol(); this.sslSessionId = session.getId(); if (session instanceof ExtendedSSLSession) { ExtendedSSLSession extendedSession = (ExtendedSSLSession) session; this.sslSniServerNames = extendedSession.getRequestedServerNames(); } try { this.sslPeerSubject = session.getPeerPrincipal().getName(); X509Certificate peerCertificate = (X509Certificate) session.getPeerCertificates()[0]; this.sslPeerNotBefore = peerCertificate.getNotBefore(); this.sslPeerNotAfter = peerCertificate.getNotAfter(); this.sslSubjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(peerCertificate).stream() .map(SubjectAlternativeName::getValue) .collect(Collectors.toList()); this.sslPeerIssuerSubject = peerCertificate.getIssuerDN().getName(); this.sslPeerEncodedCertificate = peerCertificate.getEncoded(); } catch (SSLPeerUnverifiedException | CertificateEncodingException e) { } return this; } synchronized ConnectionInfo setSslHandshakeFailure(SSLHandshakeException exception) { this.sslHandshakeException = exception; return this; } synchronized ConnectionInfo setHttpProtocol(String protocol) { this.httpProtocol = protocol; return this; } synchronized ConnectionInfo setProxyProtocolVersion(String version) { this.proxyProtocolVersion = version; return this; } synchronized ConnectionLogEntry toLogEntry() { ConnectionLogEntry.Builder builder = ConnectionLogEntry.builder(uuid, Instant.ofEpochMilli(createdAt)); if (closedAt > 0) { builder.withDuration((closedAt - createdAt) / 1000D); } if (httpBytesReceived > 0) { builder.withHttpBytesReceived(httpBytesReceived); } if (httpBytesSent > 0) { builder.withHttpBytesSent(httpBytesSent); } if (requests > 0) { builder.withRequests(requests); } if (responses > 0) { builder.withResponses(responses); } if (peerAddress != null) { builder.withPeerAddress(peerAddress.getHostString()) .withPeerPort(peerAddress.getPort()); } if (localAddress != null) { builder.withLocalAddress(localAddress.getHostString()) .withLocalPort(localAddress.getPort()); } if (remoteAddress != null) { builder.withRemoteAddress(remoteAddress.getHostString()) .withRemotePort(remoteAddress.getPort()); } if (sslProtocol != null && sslCipherSuite != null && sslSessionId != null) { builder.withSslProtocol(sslProtocol) .withSslCipherSuite(sslCipherSuite) .withSslSessionId(HexDump.toHexString(sslSessionId)); } if (sslSniServerNames != null) { sslSniServerNames.stream() .filter(name -> name instanceof SNIHostName && name.getType() == StandardConstants.SNI_HOST_NAME) .map(name -> ((SNIHostName) name).getAsciiName()) .findAny() .ifPresent(builder::withSslSniServerName); } if (sslPeerSubject != null && sslPeerNotAfter != null && sslPeerNotBefore != null && sslPeerIssuerSubject != null && sslPeerEncodedCertificate != null) { builder.withSslPeerSubject(sslPeerSubject) .withSslPeerIssuerSubject(sslPeerIssuerSubject) .withSslPeerNotAfter(sslPeerNotAfter.toInstant()) .withSslPeerNotBefore(sslPeerNotBefore.toInstant()) .withSslPeerFingerprint(certificateFingerprint(sslPeerEncodedCertificate)); } if (sslSubjectAlternativeNames != null && !sslSubjectAlternativeNames.isEmpty()) { builder.withSslSubjectAlternativeNames(sslSubjectAlternativeNames); } if (sslHandshakeException != null) { List<ExceptionEntry> exceptionChain = new ArrayList<>(); Throwable cause = sslHandshakeException; while (cause != null) { exceptionChain.add(new ExceptionEntry(cause.getClass().getName(), cause.getMessage())); cause = cause.getCause(); } String type = SslHandshakeFailure.fromSslHandshakeException(sslHandshakeException) .map(SslHandshakeFailure::failureType) .orElse("UNKNOWN"); builder.withSslHandshakeFailure(new ConnectionLogEntry.SslHandshakeFailure(type, exceptionChain)); } if (httpProtocol != null) { builder.withHttpProtocol(httpProtocol); } if (proxyProtocolVersion != null) { builder.withProxyProtocolVersion(proxyProtocolVersion); } return builder.build(); } }
Better to store the readyAt where we compute the job versions, and have all data available.
private List<Job> computeReadyJobs(DeploymentStatus status) { List<Job> jobs = new ArrayList<>(); Map<JobId, List<DeploymentStatus.Job>> jobsToRun = status.jobsToRun(); jobsToRun.forEach((job, versionsList) -> { versionsList.get(0).readyAt() .filter(readyAt -> ! clock.instant().isBefore(readyAt)) .filter(__ -> ! (job.type().isProduction() && isUnhealthyInAnotherZone(status.application(), job))) .filter(__ -> abortIfRunning(status, jobsToRun, job)) .map(readyAt -> deploymentJob(status.application().require(job.application().instance()), versionsList.get(0).versions(), job.type(), status.instanceJobs(job.application().instance()).get(job.type()), readyAt)) .ifPresent(jobs::add); }); return Collections.unmodifiableList(jobs); }
versionsList.get(0).readyAt()
private List<Job> computeReadyJobs(DeploymentStatus status) { List<Job> jobs = new ArrayList<>(); Map<JobId, List<DeploymentStatus.Job>> jobsToRun = status.jobsToRun(); jobsToRun.forEach((job, versionsList) -> { versionsList.get(0).readyAt() .filter(readyAt -> ! clock.instant().isBefore(readyAt)) .filter(__ -> ! (job.type().isProduction() && isUnhealthyInAnotherZone(status.application(), job))) .filter(__ -> abortIfRunning(status, jobsToRun, job)) .map(readyAt -> deploymentJob(status.application().require(job.application().instance()), versionsList.get(0).versions(), job.type(), status.instanceJobs(job.application().instance()).get(job.type()), readyAt)) .ifPresent(jobs::add); }); return Collections.unmodifiableList(jobs); }
class DeploymentTrigger { public static final Duration maxPause = Duration.ofDays(3); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final JobController jobs; public DeploymentTrigger(Controller controller, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentInstanceSpec spec) { return new DeploymentSteps(spec, controller::system); } public void notifyOfSubmission(TenantAndApplicationId id, ApplicationVersion version, long projectId) { if (applications().getApplication(id).isEmpty()) { log.log(Level.WARNING, "Ignoring submission from project '" + projectId + "': Unknown application '" + id + "'"); return; } applications().lockApplicationOrThrow(id, application -> { application = application.withProjectId(OptionalLong.of(projectId)); application = application.withNewSubmission(version); applications().store(application); }); triggerNewRevision(id); } /** * Propagates the latest revision to ready instances. * Ready instances are those whose dependencies are complete, and which aren't blocked, and, additionally, * which aren't upgrading, or are already deploying an application change, or failing upgrade. */ public void triggerNewRevision(TenantAndApplicationId id) { applications().lockApplicationIfPresent(id, application -> { DeploymentStatus status = jobs.deploymentStatus(application.get()); for (InstanceName instanceName : application.get().deploymentSpec().instanceNames()) { Change outstanding = status.outstandingChange(instanceName); if ( outstanding.hasTargets() && status.instanceSteps().get(instanceName) .readyAt(outstanding) .map(readyAt -> ! readyAt.isAfter(clock.instant())).orElse(false) && acceptNewApplicationVersion(status, instanceName)) { application = application.with(instanceName, instance -> withRemainingChange(instance, instance.change().with(outstanding.application().get()), status)); } } applications().store(application); }); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(ApplicationId id) { if (applications().getInstance(id).isEmpty()) { log.log(Level.WARNING, "Ignoring completion of job of unknown application '" + id + "'"); return; } applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> withRemainingChange(instance, instance.change(), jobs.deploymentStatus(application.get()))))); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job per type is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { List<Job> readyJobs = computeReadyJobs(); var prodJobs = new ArrayList<Job>(); var testJobs = new ArrayList<Job>(); for (Job job : readyJobs) { if (job.jobType.isTest()) testJobs.add(job); else prodJobs.add(job); } List<Job> sortedProdJobs = prodJobs.stream() .collect(groupingBy(Job::applicationId)) .values().stream() .flatMap(List::stream) .collect(Collectors.toUnmodifiableList()); Map<JobType, List<Job>> sortedTestJobsByType = testJobs.stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)); sortedProdJobs.forEach(this::trigger); long triggeredJobs = sortedProdJobs.size(); for (var jobs : sortedTestJobsByType.values()) { if (jobs.size() > 0) { trigger(jobs.get(0)); triggeredJobs++; } } return triggeredJobs; } /** Attempts to trigger the given job. */ public void trigger(Job job) { log.log(Level.FINE, () -> "Triggering " + job); applications().lockApplicationOrThrow(TenantAndApplicationId.from(job.applicationId()), application -> { jobs.start(job.applicationId(), job.jobType, job.versions); applications().store(application.with(job.applicationId().instance(), instance -> instance.withJobPause(job.jobType, OptionalLong.empty()))); }); } /** Force triggering of a job for given instance, with same versions as last run. */ public JobId reTrigger(ApplicationId applicationId, JobType jobType) { Application application = applications().requireApplication(TenantAndApplicationId.from(applicationId)); Instance instance = application.require(applicationId.instance()); JobId job = new JobId(instance.id(), jobType); JobStatus jobStatus = jobs.jobStatus(new JobId(applicationId, jobType)); Versions versions = jobStatus.lastTriggered() .orElseThrow(() -> new IllegalArgumentException(job + " has never been triggered")) .versions(); trigger(deploymentJob(instance, versions, jobType, jobStatus, clock.instant())); return job; } /** Force triggering of a job for given instance. */ public List<JobId> forceTrigger(ApplicationId applicationId, JobType jobType, String user, boolean requireTests) { Application application = applications().requireApplication(TenantAndApplicationId.from(applicationId)); Instance instance = application.require(applicationId.instance()); JobId job = new JobId(instance.id(), jobType); if (job.type().environment().isManuallyDeployed()) return forceTriggerManualJob(job); DeploymentStatus status = jobs.deploymentStatus(application); Versions versions = Versions.from(instance.change(), application, status.deploymentFor(job), controller.readSystemVersion()); DeploymentStatus.Job toTrigger = new DeploymentStatus.Job(versions, Optional.of(controller.clock().instant()), instance.change()); Map<JobId, List<DeploymentStatus.Job>> jobs = status.testJobs(Map.of(job, List.of(toTrigger))); if (jobs.isEmpty() || ! requireTests) jobs = Map.of(job, List.of(toTrigger)); jobs.forEach((jobId, versionsList) -> { trigger(deploymentJob(instance, versionsList.get(0).versions(), jobId.type(), status.jobs().get(jobId).get(), clock.instant())); }); return List.copyOf(jobs.keySet()); } private List<JobId> forceTriggerManualJob(JobId job) { Run last = jobs.last(job).orElseThrow(() -> new IllegalArgumentException(job + " has never been run")); Versions target = new Versions(controller.readSystemVersion(), last.versions().targetApplication(), Optional.of(last.versions().targetPlatform()), Optional.of(last.versions().targetApplication())); jobs.start(job.application(), job.type(), target, true); return List.of(job); } /** Retrigger job. If the job is already running, it will be canceled, and retrigger enqueued. */ public Optional<JobId> reTriggerOrAddToQueue(DeploymentId deployment) { JobType jobType = JobType.from(controller.system(), deployment.zoneId()) .orElseThrow(() -> new IllegalArgumentException(Text.format("No job to trigger for (system/zone): %s/%s", controller.system().value(), deployment.zoneId().value()))); Optional<Run> existingRun = controller.jobController().active(deployment.applicationId()).stream() .filter(run -> run.id().type().equals(jobType)) .findFirst(); if (existingRun.isPresent()) { Run run = existingRun.get(); try (Lock lock = controller.curator().lockDeploymentRetriggerQueue()) { List<RetriggerEntry> retriggerEntries = controller.curator().readRetriggerEntries(); List<RetriggerEntry> newList = new ArrayList<>(retriggerEntries); RetriggerEntry requiredEntry = new RetriggerEntry(new JobId(deployment.applicationId(), jobType), run.id().number() + 1); if(newList.stream().noneMatch(entry -> entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun()>=requiredEntry.requiredRun())) { newList.add(requiredEntry); } newList = newList.stream() .filter(entry -> !(entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun() < requiredEntry.requiredRun())) .collect(toList()); controller.curator().writeRetriggerEntries(newList); } controller.jobController().abort(run.id()); return Optional.empty(); } else { return Optional.of(reTrigger(deployment.applicationId(), jobType)); } } /** Prevents jobs of the given type from starting, until the given time. */ public void pauseJob(ApplicationId id, JobType jobType, Instant until) { if (until.isAfter(clock.instant().plus(maxPause))) throw new IllegalArgumentException("Pause only allowed for up to " + maxPause); applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> instance.withJobPause(jobType, OptionalLong.of(until.toEpochMilli()))))); } /** Resumes a previously paused job, letting it be triggered normally. */ public void resumeJob(ApplicationId id, JobType jobType) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> instance.withJobPause(jobType, OptionalLong.empty())))); } /** Triggers a change of this application, unless it already has a change. */ public void triggerChange(ApplicationId instanceId, Change change) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).change().hasTargets()) forceChange(instanceId, change); }); } /** Overrides the given instance's platform and application changes with any contained in the given change. */ public void forceChange(ApplicationId instanceId, Change change) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { applications().store(application.with(instanceId.instance(), instance -> withRemainingChange(instance, change.onTopOf(application.get().require(instanceId.instance()).change()), jobs.deploymentStatus(application.get())))); }); } /** Cancels the indicated part of the given application's change. */ public void cancelChange(ApplicationId instanceId, ChangesToCancel cancellation) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { Change change; switch (cancellation) { case ALL: change = Change.empty(); break; case VERSIONS: change = Change.empty().withPin(); break; case PLATFORM: change = application.get().require(instanceId.instance()).change().withoutPlatform(); break; case APPLICATION: change = application.get().require(instanceId.instance()).change().withoutApplication(); break; case PIN: change = application.get().require(instanceId.instance()).change().withoutPin(); break; default: throw new IllegalArgumentException("Unknown cancellation choice '" + cancellation + "'!"); } applications().store(application.with(instanceId.instance(), instance -> withRemainingChange(instance, change, jobs.deploymentStatus(application.get())))); }); } public enum ChangesToCancel { ALL, PLATFORM, APPLICATION, VERSIONS, PIN } private ApplicationController applications() { return controller.applications(); } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return jobs.deploymentStatuses(ApplicationList.from(applications().readable()) .withProjectId() .withDeploymentSpec()) .withChanges() .asList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** Finds the next step to trigger for the given application, if any, and returns these as a list. */ /** Returns whether the application is healthy in all other production zones. */ private boolean isUnhealthyInAnotherZone(Application application, JobId job) { for (Deployment deployment : application.require(job.application().instance()).productionDeployments().values()) { if ( ! deployment.zone().equals(job.type().zone(controller.system())) && ! controller.applications().isHealthy(new DeploymentId(job.application(), deployment.zone()))) return true; } return false; } private void abortIfOutdated(DeploymentStatus status, Map<JobId, List<DeploymentStatus.Job>> jobs, JobId job) { status.jobs().get(job) .flatMap(JobStatus::lastTriggered) .filter(last -> ! last.hasEnded()) .ifPresent(last -> { if (jobs.get(job).stream().noneMatch(versions -> versions.versions().targetsMatch(last.versions()) && versions.versions().sourcesMatchIfPresent(last.versions()))) { log.log(Level.INFO, "Aborting outdated run " + last); controller.jobController().abort(last.id()); } }); } /** Returns whether the job is free to start, and also aborts it if it's running with outdated versions. */ private boolean abortIfRunning(DeploymentStatus status, Map<JobId, List<DeploymentStatus.Job>> jobs, JobId job) { abortIfOutdated(status, jobs, job); boolean blocked = status.jobs().get(job).get().isRunning(); if ( ! job.type().isTest()) { Optional<JobStatus> productionTest = JobType.testFrom(controller.system(), job.type().zone(controller.system()).region()) .map(type -> new JobId(job.application(), type)) .flatMap(status.jobs()::get); if (productionTest.isPresent()) { abortIfOutdated(status, jobs, productionTest.get().id()); if (productionTest.map(JobStatus::id).map(jobs::get) .map(versions -> ! versions.get(0).versions().targetsMatch(jobs.get(job).get(0).versions())) .orElse(false)) blocked = true; } } return ! blocked; } private boolean acceptNewApplicationVersion(DeploymentStatus status, InstanceName instance) { if (status.application().deploymentSpec().instance(instance).isEmpty()) return false; if (status.hasFailures(instance)) return true; DeploymentInstanceSpec spec = status.application().deploymentSpec().requireInstance(instance); Change change = status.application().require(instance).change(); if (change.application().isPresent() && spec.upgradeRevision() == DeploymentSpec.UpgradeRevision.separate) return false; return true; } private Instance withRemainingChange(Instance instance, Change change, DeploymentStatus status) { Change remaining = change; if (status.jobsToRun(Map.of(instance.name(), change.withoutApplication())).isEmpty()) remaining = remaining.withoutPlatform(); if (status.jobsToRun(Map.of(instance.name(), change.withoutPlatform())).isEmpty()) { remaining = remaining.withoutApplication(); if (change.application().isPresent()) instance = instance.withLatestDeployed(change.application().get()); } return instance.withChange(remaining); } private Job deploymentJob(Instance instance, Versions versions, JobType jobType, JobStatus jobStatus, Instant availableSince) { return new Job(instance, versions, jobType, availableSince, jobStatus.isOutOfCapacity(), instance.change().application().isPresent()); } private static class Job { private final ApplicationId instanceId; private final JobType jobType; private final Versions versions; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Instance instance, Versions versions, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { this.instanceId = instance.id(); this.jobType = jobType; this.versions = versions; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } ApplicationId applicationId() { return instanceId; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } @Override public String toString() { return jobType + " for " + instanceId + " on (" + versions.targetPlatform() + versions.sourcePlatform().map(version -> " <-- " + version).orElse("") + ", " + versions.targetApplication().id() + versions.sourceApplication().map(version -> " <-- " + version.id()).orElse("") + "), ready since " + availableSince; } } }
class DeploymentTrigger { public static final Duration maxPause = Duration.ofDays(3); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final JobController jobs; public DeploymentTrigger(Controller controller, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentInstanceSpec spec) { return new DeploymentSteps(spec, controller::system); } public void notifyOfSubmission(TenantAndApplicationId id, ApplicationVersion version, long projectId) { if (applications().getApplication(id).isEmpty()) { log.log(Level.WARNING, "Ignoring submission from project '" + projectId + "': Unknown application '" + id + "'"); return; } applications().lockApplicationOrThrow(id, application -> { application = application.withProjectId(OptionalLong.of(projectId)); application = application.withNewSubmission(version); applications().store(application); }); triggerNewRevision(id); } /** * Propagates the latest revision to ready instances. * Ready instances are those whose dependencies are complete, and which aren't blocked, and, additionally, * which aren't upgrading, or are already deploying an application change, or failing upgrade. */ public void triggerNewRevision(TenantAndApplicationId id) { applications().lockApplicationIfPresent(id, application -> { DeploymentStatus status = jobs.deploymentStatus(application.get()); for (InstanceName instanceName : application.get().deploymentSpec().instanceNames()) { Change outstanding = status.outstandingChange(instanceName); if ( outstanding.hasTargets() && status.instanceSteps().get(instanceName) .readyAt(outstanding) .map(readyAt -> ! readyAt.isAfter(clock.instant())).orElse(false) && acceptNewApplicationVersion(status, instanceName, outstanding.application().get())) { application = application.with(instanceName, instance -> withRemainingChange(instance, instance.change().with(outstanding.application().get()), status)); } } applications().store(application); }); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(ApplicationId id) { if (applications().getInstance(id).isEmpty()) { log.log(Level.WARNING, "Ignoring completion of job of unknown application '" + id + "'"); return; } applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> withRemainingChange(instance, instance.change(), jobs.deploymentStatus(application.get()))))); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job per type is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { List<Job> readyJobs = computeReadyJobs(); var prodJobs = new ArrayList<Job>(); var testJobs = new ArrayList<Job>(); for (Job job : readyJobs) { if (job.jobType.isTest()) testJobs.add(job); else prodJobs.add(job); } List<Job> sortedProdJobs = prodJobs.stream() .collect(groupingBy(Job::applicationId)) .values().stream() .flatMap(List::stream) .collect(Collectors.toUnmodifiableList()); Map<JobType, List<Job>> sortedTestJobsByType = testJobs.stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)); sortedProdJobs.forEach(this::trigger); long triggeredJobs = sortedProdJobs.size(); for (var jobs : sortedTestJobsByType.values()) { if (jobs.size() > 0) { trigger(jobs.get(0)); triggeredJobs++; } } return triggeredJobs; } /** Attempts to trigger the given job. */ public void trigger(Job job) { log.log(Level.FINE, () -> "Triggering " + job); applications().lockApplicationOrThrow(TenantAndApplicationId.from(job.applicationId()), application -> { jobs.start(job.applicationId(), job.jobType, job.versions); applications().store(application.with(job.applicationId().instance(), instance -> instance.withJobPause(job.jobType, OptionalLong.empty()))); }); } /** Force triggering of a job for given instance, with same versions as last run. */ public JobId reTrigger(ApplicationId applicationId, JobType jobType) { Application application = applications().requireApplication(TenantAndApplicationId.from(applicationId)); Instance instance = application.require(applicationId.instance()); JobId job = new JobId(instance.id(), jobType); JobStatus jobStatus = jobs.jobStatus(new JobId(applicationId, jobType)); Versions versions = jobStatus.lastTriggered() .orElseThrow(() -> new IllegalArgumentException(job + " has never been triggered")) .versions(); trigger(deploymentJob(instance, versions, jobType, jobStatus, clock.instant())); return job; } /** Force triggering of a job for given instance. */ public List<JobId> forceTrigger(ApplicationId applicationId, JobType jobType, String user, boolean requireTests) { Application application = applications().requireApplication(TenantAndApplicationId.from(applicationId)); Instance instance = application.require(applicationId.instance()); JobId job = new JobId(instance.id(), jobType); if (job.type().environment().isManuallyDeployed()) return forceTriggerManualJob(job); DeploymentStatus status = jobs.deploymentStatus(application); Versions versions = Versions.from(instance.change(), application, status.deploymentFor(job), controller.readSystemVersion()); DeploymentStatus.Job toTrigger = new DeploymentStatus.Job(versions, Optional.of(controller.clock().instant()), instance.change()); Map<JobId, List<DeploymentStatus.Job>> jobs = status.testJobs(Map.of(job, List.of(toTrigger))); if (jobs.isEmpty() || ! requireTests) jobs = Map.of(job, List.of(toTrigger)); jobs.forEach((jobId, versionsList) -> { trigger(deploymentJob(instance, versionsList.get(0).versions(), jobId.type(), status.jobs().get(jobId).get(), clock.instant())); }); return List.copyOf(jobs.keySet()); } private List<JobId> forceTriggerManualJob(JobId job) { Run last = jobs.last(job).orElseThrow(() -> new IllegalArgumentException(job + " has never been run")); Versions target = new Versions(controller.readSystemVersion(), last.versions().targetApplication(), Optional.of(last.versions().targetPlatform()), Optional.of(last.versions().targetApplication())); jobs.start(job.application(), job.type(), target, true); return List.of(job); } /** Retrigger job. If the job is already running, it will be canceled, and retrigger enqueued. */ public Optional<JobId> reTriggerOrAddToQueue(DeploymentId deployment) { JobType jobType = JobType.from(controller.system(), deployment.zoneId()) .orElseThrow(() -> new IllegalArgumentException(Text.format("No job to trigger for (system/zone): %s/%s", controller.system().value(), deployment.zoneId().value()))); Optional<Run> existingRun = controller.jobController().active(deployment.applicationId()).stream() .filter(run -> run.id().type().equals(jobType)) .findFirst(); if (existingRun.isPresent()) { Run run = existingRun.get(); try (Lock lock = controller.curator().lockDeploymentRetriggerQueue()) { List<RetriggerEntry> retriggerEntries = controller.curator().readRetriggerEntries(); List<RetriggerEntry> newList = new ArrayList<>(retriggerEntries); RetriggerEntry requiredEntry = new RetriggerEntry(new JobId(deployment.applicationId(), jobType), run.id().number() + 1); if(newList.stream().noneMatch(entry -> entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun()>=requiredEntry.requiredRun())) { newList.add(requiredEntry); } newList = newList.stream() .filter(entry -> !(entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun() < requiredEntry.requiredRun())) .collect(toList()); controller.curator().writeRetriggerEntries(newList); } controller.jobController().abort(run.id()); return Optional.empty(); } else { return Optional.of(reTrigger(deployment.applicationId(), jobType)); } } /** Prevents jobs of the given type from starting, until the given time. */ public void pauseJob(ApplicationId id, JobType jobType, Instant until) { if (until.isAfter(clock.instant().plus(maxPause))) throw new IllegalArgumentException("Pause only allowed for up to " + maxPause); applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> instance.withJobPause(jobType, OptionalLong.of(until.toEpochMilli()))))); } /** Resumes a previously paused job, letting it be triggered normally. */ public void resumeJob(ApplicationId id, JobType jobType) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> instance.withJobPause(jobType, OptionalLong.empty())))); } /** Triggers a change of this application, unless it already has a change. */ public void triggerChange(ApplicationId instanceId, Change change) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).change().hasTargets()) forceChange(instanceId, change); }); } /** Overrides the given instance's platform and application changes with any contained in the given change. */ public void forceChange(ApplicationId instanceId, Change change) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { applications().store(application.with(instanceId.instance(), instance -> withRemainingChange(instance, change.onTopOf(application.get().require(instanceId.instance()).change()), jobs.deploymentStatus(application.get())))); }); } /** Cancels the indicated part of the given application's change. */ public void cancelChange(ApplicationId instanceId, ChangesToCancel cancellation) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { Change change; switch (cancellation) { case ALL: change = Change.empty(); break; case VERSIONS: change = Change.empty().withPin(); break; case PLATFORM: change = application.get().require(instanceId.instance()).change().withoutPlatform(); break; case APPLICATION: change = application.get().require(instanceId.instance()).change().withoutApplication(); break; case PIN: change = application.get().require(instanceId.instance()).change().withoutPin(); break; default: throw new IllegalArgumentException("Unknown cancellation choice '" + cancellation + "'!"); } applications().store(application.with(instanceId.instance(), instance -> withRemainingChange(instance, change, jobs.deploymentStatus(application.get())))); }); } public enum ChangesToCancel { ALL, PLATFORM, APPLICATION, VERSIONS, PIN } private ApplicationController applications() { return controller.applications(); } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return jobs.deploymentStatuses(ApplicationList.from(applications().readable()) .withProjectId() .withDeploymentSpec()) .withChanges() .asList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** Finds the next step to trigger for the given application, if any, and returns these as a list. */ /** Returns whether the application is healthy in all other production zones. */ private boolean isUnhealthyInAnotherZone(Application application, JobId job) { for (Deployment deployment : application.require(job.application().instance()).productionDeployments().values()) { if ( ! deployment.zone().equals(job.type().zone(controller.system())) && ! controller.applications().isHealthy(new DeploymentId(job.application(), deployment.zone()))) return true; } return false; } private void abortIfOutdated(DeploymentStatus status, Map<JobId, List<DeploymentStatus.Job>> jobs, JobId job) { status.jobs().get(job) .flatMap(JobStatus::lastTriggered) .filter(last -> ! last.hasEnded()) .ifPresent(last -> { if (jobs.get(job).stream().noneMatch(versions -> versions.versions().targetsMatch(last.versions()) && versions.versions().sourcesMatchIfPresent(last.versions()))) { log.log(Level.INFO, "Aborting outdated run " + last); controller.jobController().abort(last.id()); } }); } /** Returns whether the job is free to start, and also aborts it if it's running with outdated versions. */ private boolean abortIfRunning(DeploymentStatus status, Map<JobId, List<DeploymentStatus.Job>> jobs, JobId job) { abortIfOutdated(status, jobs, job); boolean blocked = status.jobs().get(job).get().isRunning(); if ( ! job.type().isTest()) { Optional<JobStatus> productionTest = JobType.testFrom(controller.system(), job.type().zone(controller.system()).region()) .map(type -> new JobId(job.application(), type)) .flatMap(status.jobs()::get); if (productionTest.isPresent()) { abortIfOutdated(status, jobs, productionTest.get().id()); if (productionTest.map(JobStatus::id).map(jobs::get) .map(versions -> ! versions.get(0).versions().targetsMatch(jobs.get(job).get(0).versions())) .orElse(false)) blocked = true; } } return ! blocked; } private boolean acceptNewApplicationVersion(DeploymentStatus status, InstanceName instance, ApplicationVersion version) { if (status.application().deploymentSpec().instance(instance).isEmpty()) return false; if (status.hasFailures(version)) return true; DeploymentInstanceSpec spec = status.application().deploymentSpec().requireInstance(instance); Change change = status.application().require(instance).change(); return change.application().isEmpty() || spec.upgradeRevision() != DeploymentSpec.UpgradeRevision.separate; } private Instance withRemainingChange(Instance instance, Change change, DeploymentStatus status) { Change remaining = change; if (status.jobsToRun(Map.of(instance.name(), change.withoutApplication())).isEmpty()) remaining = remaining.withoutPlatform(); if (status.jobsToRun(Map.of(instance.name(), change.withoutPlatform())).isEmpty()) { remaining = remaining.withoutApplication(); if (change.application().isPresent()) instance = instance.withLatestDeployed(change.application().get()); } return instance.withChange(remaining); } private Job deploymentJob(Instance instance, Versions versions, JobType jobType, JobStatus jobStatus, Instant availableSince) { return new Job(instance, versions, jobType, availableSince, jobStatus.isOutOfCapacity(), instance.change().application().isPresent()); } private static class Job { private final ApplicationId instanceId; private final JobType jobType; private final Versions versions; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Instance instance, Versions versions, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { this.instanceId = instance.id(); this.jobType = jobType; this.versions = versions; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } ApplicationId applicationId() { return instanceId; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } @Override public String toString() { return jobType + " for " + instanceId + " on (" + versions.targetPlatform() + versions.sourcePlatform().map(version -> " <-- " + version).orElse("") + ", " + versions.targetApplication().id() + versions.sourceApplication().map(version -> " <-- " + version.id()).orElse("") + "), ready since " + availableSince; } } }
Woop.
private boolean abortIfRunning(DeploymentStatus status, Map<JobId, List<DeploymentStatus.Job>> jobs, JobId job) { abortIfOutdated(status, jobs, job); boolean blocked = status.jobs().get(job).get().isRunning(); if ( ! job.type().isTest()) { Optional<JobStatus> productionTest = JobType.testFrom(controller.system(), job.type().zone(controller.system()).region()) .map(type -> new JobId(job.application(), type)) .flatMap(status.jobs()::get); if (productionTest.isPresent()) { abortIfOutdated(status, jobs, productionTest.get().id()); if (productionTest.map(JobStatus::id).map(jobs::get) .map(versions -> ! versions.get(0).versions().targetsMatch(jobs.get(job).get(0).versions())) .orElse(false)) blocked = true; } } return ! blocked; }
.map(type -> new JobId(job.application(), type))
private boolean abortIfRunning(DeploymentStatus status, Map<JobId, List<DeploymentStatus.Job>> jobs, JobId job) { abortIfOutdated(status, jobs, job); boolean blocked = status.jobs().get(job).get().isRunning(); if ( ! job.type().isTest()) { Optional<JobStatus> productionTest = JobType.testFrom(controller.system(), job.type().zone(controller.system()).region()) .map(type -> new JobId(job.application(), type)) .flatMap(status.jobs()::get); if (productionTest.isPresent()) { abortIfOutdated(status, jobs, productionTest.get().id()); if (productionTest.map(JobStatus::id).map(jobs::get) .map(versions -> ! versions.get(0).versions().targetsMatch(jobs.get(job).get(0).versions())) .orElse(false)) blocked = true; } } return ! blocked; }
class DeploymentTrigger { public static final Duration maxPause = Duration.ofDays(3); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final JobController jobs; public DeploymentTrigger(Controller controller, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentInstanceSpec spec) { return new DeploymentSteps(spec, controller::system); } public void notifyOfSubmission(TenantAndApplicationId id, ApplicationVersion version, long projectId) { if (applications().getApplication(id).isEmpty()) { log.log(Level.WARNING, "Ignoring submission from project '" + projectId + "': Unknown application '" + id + "'"); return; } applications().lockApplicationOrThrow(id, application -> { application = application.withProjectId(OptionalLong.of(projectId)); application = application.withNewSubmission(version); applications().store(application); }); triggerNewRevision(id); } /** * Propagates the latest revision to ready instances. * Ready instances are those whose dependencies are complete, and which aren't blocked, and, additionally, * which aren't upgrading, or are already deploying an application change, or failing upgrade. */ public void triggerNewRevision(TenantAndApplicationId id) { applications().lockApplicationIfPresent(id, application -> { DeploymentStatus status = jobs.deploymentStatus(application.get()); for (InstanceName instanceName : application.get().deploymentSpec().instanceNames()) { Change outstanding = status.outstandingChange(instanceName); if ( outstanding.hasTargets() && status.instanceSteps().get(instanceName) .readyAt(outstanding) .map(readyAt -> ! readyAt.isAfter(clock.instant())).orElse(false) && acceptNewApplicationVersion(status, instanceName)) { application = application.with(instanceName, instance -> withRemainingChange(instance, instance.change().with(outstanding.application().get()), status)); } } applications().store(application); }); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(ApplicationId id) { if (applications().getInstance(id).isEmpty()) { log.log(Level.WARNING, "Ignoring completion of job of unknown application '" + id + "'"); return; } applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> withRemainingChange(instance, instance.change(), jobs.deploymentStatus(application.get()))))); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job per type is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { List<Job> readyJobs = computeReadyJobs(); var prodJobs = new ArrayList<Job>(); var testJobs = new ArrayList<Job>(); for (Job job : readyJobs) { if (job.jobType.isTest()) testJobs.add(job); else prodJobs.add(job); } List<Job> sortedProdJobs = prodJobs.stream() .collect(groupingBy(Job::applicationId)) .values().stream() .flatMap(List::stream) .collect(Collectors.toUnmodifiableList()); Map<JobType, List<Job>> sortedTestJobsByType = testJobs.stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)); sortedProdJobs.forEach(this::trigger); long triggeredJobs = sortedProdJobs.size(); for (var jobs : sortedTestJobsByType.values()) { if (jobs.size() > 0) { trigger(jobs.get(0)); triggeredJobs++; } } return triggeredJobs; } /** Attempts to trigger the given job. */ public void trigger(Job job) { log.log(Level.FINE, () -> "Triggering " + job); applications().lockApplicationOrThrow(TenantAndApplicationId.from(job.applicationId()), application -> { jobs.start(job.applicationId(), job.jobType, job.versions); applications().store(application.with(job.applicationId().instance(), instance -> instance.withJobPause(job.jobType, OptionalLong.empty()))); }); } /** Force triggering of a job for given instance, with same versions as last run. */ public JobId reTrigger(ApplicationId applicationId, JobType jobType) { Application application = applications().requireApplication(TenantAndApplicationId.from(applicationId)); Instance instance = application.require(applicationId.instance()); JobId job = new JobId(instance.id(), jobType); JobStatus jobStatus = jobs.jobStatus(new JobId(applicationId, jobType)); Versions versions = jobStatus.lastTriggered() .orElseThrow(() -> new IllegalArgumentException(job + " has never been triggered")) .versions(); trigger(deploymentJob(instance, versions, jobType, jobStatus, clock.instant())); return job; } /** Force triggering of a job for given instance. */ public List<JobId> forceTrigger(ApplicationId applicationId, JobType jobType, String user, boolean requireTests) { Application application = applications().requireApplication(TenantAndApplicationId.from(applicationId)); Instance instance = application.require(applicationId.instance()); JobId job = new JobId(instance.id(), jobType); if (job.type().environment().isManuallyDeployed()) return forceTriggerManualJob(job); DeploymentStatus status = jobs.deploymentStatus(application); Versions versions = Versions.from(instance.change(), application, status.deploymentFor(job), controller.readSystemVersion()); DeploymentStatus.Job toTrigger = new DeploymentStatus.Job(versions, Optional.of(controller.clock().instant()), instance.change()); Map<JobId, List<DeploymentStatus.Job>> jobs = status.testJobs(Map.of(job, List.of(toTrigger))); if (jobs.isEmpty() || ! requireTests) jobs = Map.of(job, List.of(toTrigger)); jobs.forEach((jobId, versionsList) -> { trigger(deploymentJob(instance, versionsList.get(0).versions(), jobId.type(), status.jobs().get(jobId).get(), clock.instant())); }); return List.copyOf(jobs.keySet()); } private List<JobId> forceTriggerManualJob(JobId job) { Run last = jobs.last(job).orElseThrow(() -> new IllegalArgumentException(job + " has never been run")); Versions target = new Versions(controller.readSystemVersion(), last.versions().targetApplication(), Optional.of(last.versions().targetPlatform()), Optional.of(last.versions().targetApplication())); jobs.start(job.application(), job.type(), target, true); return List.of(job); } /** Retrigger job. If the job is already running, it will be canceled, and retrigger enqueued. */ public Optional<JobId> reTriggerOrAddToQueue(DeploymentId deployment) { JobType jobType = JobType.from(controller.system(), deployment.zoneId()) .orElseThrow(() -> new IllegalArgumentException(Text.format("No job to trigger for (system/zone): %s/%s", controller.system().value(), deployment.zoneId().value()))); Optional<Run> existingRun = controller.jobController().active(deployment.applicationId()).stream() .filter(run -> run.id().type().equals(jobType)) .findFirst(); if (existingRun.isPresent()) { Run run = existingRun.get(); try (Lock lock = controller.curator().lockDeploymentRetriggerQueue()) { List<RetriggerEntry> retriggerEntries = controller.curator().readRetriggerEntries(); List<RetriggerEntry> newList = new ArrayList<>(retriggerEntries); RetriggerEntry requiredEntry = new RetriggerEntry(new JobId(deployment.applicationId(), jobType), run.id().number() + 1); if(newList.stream().noneMatch(entry -> entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun()>=requiredEntry.requiredRun())) { newList.add(requiredEntry); } newList = newList.stream() .filter(entry -> !(entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun() < requiredEntry.requiredRun())) .collect(toList()); controller.curator().writeRetriggerEntries(newList); } controller.jobController().abort(run.id()); return Optional.empty(); } else { return Optional.of(reTrigger(deployment.applicationId(), jobType)); } } /** Prevents jobs of the given type from starting, until the given time. */ public void pauseJob(ApplicationId id, JobType jobType, Instant until) { if (until.isAfter(clock.instant().plus(maxPause))) throw new IllegalArgumentException("Pause only allowed for up to " + maxPause); applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> instance.withJobPause(jobType, OptionalLong.of(until.toEpochMilli()))))); } /** Resumes a previously paused job, letting it be triggered normally. */ public void resumeJob(ApplicationId id, JobType jobType) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> instance.withJobPause(jobType, OptionalLong.empty())))); } /** Triggers a change of this application, unless it already has a change. */ public void triggerChange(ApplicationId instanceId, Change change) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).change().hasTargets()) forceChange(instanceId, change); }); } /** Overrides the given instance's platform and application changes with any contained in the given change. */ public void forceChange(ApplicationId instanceId, Change change) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { applications().store(application.with(instanceId.instance(), instance -> withRemainingChange(instance, change.onTopOf(application.get().require(instanceId.instance()).change()), jobs.deploymentStatus(application.get())))); }); } /** Cancels the indicated part of the given application's change. */ public void cancelChange(ApplicationId instanceId, ChangesToCancel cancellation) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { Change change; switch (cancellation) { case ALL: change = Change.empty(); break; case VERSIONS: change = Change.empty().withPin(); break; case PLATFORM: change = application.get().require(instanceId.instance()).change().withoutPlatform(); break; case APPLICATION: change = application.get().require(instanceId.instance()).change().withoutApplication(); break; case PIN: change = application.get().require(instanceId.instance()).change().withoutPin(); break; default: throw new IllegalArgumentException("Unknown cancellation choice '" + cancellation + "'!"); } applications().store(application.with(instanceId.instance(), instance -> withRemainingChange(instance, change, jobs.deploymentStatus(application.get())))); }); } public enum ChangesToCancel { ALL, PLATFORM, APPLICATION, VERSIONS, PIN } private ApplicationController applications() { return controller.applications(); } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return jobs.deploymentStatuses(ApplicationList.from(applications().readable()) .withProjectId() .withDeploymentSpec()) .withChanges() .asList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(DeploymentStatus status) { List<Job> jobs = new ArrayList<>(); Map<JobId, List<DeploymentStatus.Job>> jobsToRun = status.jobsToRun(); jobsToRun.forEach((job, versionsList) -> { versionsList.get(0).readyAt() .filter(readyAt -> ! clock.instant().isBefore(readyAt)) .filter(__ -> ! (job.type().isProduction() && isUnhealthyInAnotherZone(status.application(), job))) .filter(__ -> abortIfRunning(status, jobsToRun, job)) .map(readyAt -> deploymentJob(status.application().require(job.application().instance()), versionsList.get(0).versions(), job.type(), status.instanceJobs(job.application().instance()).get(job.type()), readyAt)) .ifPresent(jobs::add); }); return Collections.unmodifiableList(jobs); } /** Returns whether the application is healthy in all other production zones. */ private boolean isUnhealthyInAnotherZone(Application application, JobId job) { for (Deployment deployment : application.require(job.application().instance()).productionDeployments().values()) { if ( ! deployment.zone().equals(job.type().zone(controller.system())) && ! controller.applications().isHealthy(new DeploymentId(job.application(), deployment.zone()))) return true; } return false; } private void abortIfOutdated(DeploymentStatus status, Map<JobId, List<DeploymentStatus.Job>> jobs, JobId job) { status.jobs().get(job) .flatMap(JobStatus::lastTriggered) .filter(last -> ! last.hasEnded()) .ifPresent(last -> { if (jobs.get(job).stream().noneMatch(versions -> versions.versions().targetsMatch(last.versions()) && versions.versions().sourcesMatchIfPresent(last.versions()))) { log.log(Level.INFO, "Aborting outdated run " + last); controller.jobController().abort(last.id()); } }); } /** Returns whether the job is free to start, and also aborts it if it's running with outdated versions. */ private boolean acceptNewApplicationVersion(DeploymentStatus status, InstanceName instance) { if (status.application().deploymentSpec().instance(instance).isEmpty()) return false; if (status.hasFailures(instance)) return true; DeploymentInstanceSpec spec = status.application().deploymentSpec().requireInstance(instance); Change change = status.application().require(instance).change(); if (change.application().isPresent() && spec.upgradeRevision() == DeploymentSpec.UpgradeRevision.separate) return false; return true; } private Instance withRemainingChange(Instance instance, Change change, DeploymentStatus status) { Change remaining = change; if (status.jobsToRun(Map.of(instance.name(), change.withoutApplication())).isEmpty()) remaining = remaining.withoutPlatform(); if (status.jobsToRun(Map.of(instance.name(), change.withoutPlatform())).isEmpty()) { remaining = remaining.withoutApplication(); if (change.application().isPresent()) instance = instance.withLatestDeployed(change.application().get()); } return instance.withChange(remaining); } private Job deploymentJob(Instance instance, Versions versions, JobType jobType, JobStatus jobStatus, Instant availableSince) { return new Job(instance, versions, jobType, availableSince, jobStatus.isOutOfCapacity(), instance.change().application().isPresent()); } private static class Job { private final ApplicationId instanceId; private final JobType jobType; private final Versions versions; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Instance instance, Versions versions, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { this.instanceId = instance.id(); this.jobType = jobType; this.versions = versions; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } ApplicationId applicationId() { return instanceId; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } @Override public String toString() { return jobType + " for " + instanceId + " on (" + versions.targetPlatform() + versions.sourcePlatform().map(version -> " <-- " + version).orElse("") + ", " + versions.targetApplication().id() + versions.sourceApplication().map(version -> " <-- " + version.id()).orElse("") + "), ready since " + availableSince; } } }
class DeploymentTrigger { public static final Duration maxPause = Duration.ofDays(3); private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final JobController jobs; public DeploymentTrigger(Controller controller, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentInstanceSpec spec) { return new DeploymentSteps(spec, controller::system); } public void notifyOfSubmission(TenantAndApplicationId id, ApplicationVersion version, long projectId) { if (applications().getApplication(id).isEmpty()) { log.log(Level.WARNING, "Ignoring submission from project '" + projectId + "': Unknown application '" + id + "'"); return; } applications().lockApplicationOrThrow(id, application -> { application = application.withProjectId(OptionalLong.of(projectId)); application = application.withNewSubmission(version); applications().store(application); }); triggerNewRevision(id); } /** * Propagates the latest revision to ready instances. * Ready instances are those whose dependencies are complete, and which aren't blocked, and, additionally, * which aren't upgrading, or are already deploying an application change, or failing upgrade. */ public void triggerNewRevision(TenantAndApplicationId id) { applications().lockApplicationIfPresent(id, application -> { DeploymentStatus status = jobs.deploymentStatus(application.get()); for (InstanceName instanceName : application.get().deploymentSpec().instanceNames()) { Change outstanding = status.outstandingChange(instanceName); if ( outstanding.hasTargets() && status.instanceSteps().get(instanceName) .readyAt(outstanding) .map(readyAt -> ! readyAt.isAfter(clock.instant())).orElse(false) && acceptNewApplicationVersion(status, instanceName, outstanding.application().get())) { application = application.with(instanceName, instance -> withRemainingChange(instance, instance.change().with(outstanding.application().get()), status)); } } applications().store(application); }); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(ApplicationId id) { if (applications().getInstance(id).isEmpty()) { log.log(Level.WARNING, "Ignoring completion of job of unknown application '" + id + "'"); return; } applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> withRemainingChange(instance, instance.change(), jobs.deploymentStatus(application.get()))))); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job per type is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { List<Job> readyJobs = computeReadyJobs(); var prodJobs = new ArrayList<Job>(); var testJobs = new ArrayList<Job>(); for (Job job : readyJobs) { if (job.jobType.isTest()) testJobs.add(job); else prodJobs.add(job); } List<Job> sortedProdJobs = prodJobs.stream() .collect(groupingBy(Job::applicationId)) .values().stream() .flatMap(List::stream) .collect(Collectors.toUnmodifiableList()); Map<JobType, List<Job>> sortedTestJobsByType = testJobs.stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)); sortedProdJobs.forEach(this::trigger); long triggeredJobs = sortedProdJobs.size(); for (var jobs : sortedTestJobsByType.values()) { if (jobs.size() > 0) { trigger(jobs.get(0)); triggeredJobs++; } } return triggeredJobs; } /** Attempts to trigger the given job. */ public void trigger(Job job) { log.log(Level.FINE, () -> "Triggering " + job); applications().lockApplicationOrThrow(TenantAndApplicationId.from(job.applicationId()), application -> { jobs.start(job.applicationId(), job.jobType, job.versions); applications().store(application.with(job.applicationId().instance(), instance -> instance.withJobPause(job.jobType, OptionalLong.empty()))); }); } /** Force triggering of a job for given instance, with same versions as last run. */ public JobId reTrigger(ApplicationId applicationId, JobType jobType) { Application application = applications().requireApplication(TenantAndApplicationId.from(applicationId)); Instance instance = application.require(applicationId.instance()); JobId job = new JobId(instance.id(), jobType); JobStatus jobStatus = jobs.jobStatus(new JobId(applicationId, jobType)); Versions versions = jobStatus.lastTriggered() .orElseThrow(() -> new IllegalArgumentException(job + " has never been triggered")) .versions(); trigger(deploymentJob(instance, versions, jobType, jobStatus, clock.instant())); return job; } /** Force triggering of a job for given instance. */ public List<JobId> forceTrigger(ApplicationId applicationId, JobType jobType, String user, boolean requireTests) { Application application = applications().requireApplication(TenantAndApplicationId.from(applicationId)); Instance instance = application.require(applicationId.instance()); JobId job = new JobId(instance.id(), jobType); if (job.type().environment().isManuallyDeployed()) return forceTriggerManualJob(job); DeploymentStatus status = jobs.deploymentStatus(application); Versions versions = Versions.from(instance.change(), application, status.deploymentFor(job), controller.readSystemVersion()); DeploymentStatus.Job toTrigger = new DeploymentStatus.Job(versions, Optional.of(controller.clock().instant()), instance.change()); Map<JobId, List<DeploymentStatus.Job>> jobs = status.testJobs(Map.of(job, List.of(toTrigger))); if (jobs.isEmpty() || ! requireTests) jobs = Map.of(job, List.of(toTrigger)); jobs.forEach((jobId, versionsList) -> { trigger(deploymentJob(instance, versionsList.get(0).versions(), jobId.type(), status.jobs().get(jobId).get(), clock.instant())); }); return List.copyOf(jobs.keySet()); } private List<JobId> forceTriggerManualJob(JobId job) { Run last = jobs.last(job).orElseThrow(() -> new IllegalArgumentException(job + " has never been run")); Versions target = new Versions(controller.readSystemVersion(), last.versions().targetApplication(), Optional.of(last.versions().targetPlatform()), Optional.of(last.versions().targetApplication())); jobs.start(job.application(), job.type(), target, true); return List.of(job); } /** Retrigger job. If the job is already running, it will be canceled, and retrigger enqueued. */ public Optional<JobId> reTriggerOrAddToQueue(DeploymentId deployment) { JobType jobType = JobType.from(controller.system(), deployment.zoneId()) .orElseThrow(() -> new IllegalArgumentException(Text.format("No job to trigger for (system/zone): %s/%s", controller.system().value(), deployment.zoneId().value()))); Optional<Run> existingRun = controller.jobController().active(deployment.applicationId()).stream() .filter(run -> run.id().type().equals(jobType)) .findFirst(); if (existingRun.isPresent()) { Run run = existingRun.get(); try (Lock lock = controller.curator().lockDeploymentRetriggerQueue()) { List<RetriggerEntry> retriggerEntries = controller.curator().readRetriggerEntries(); List<RetriggerEntry> newList = new ArrayList<>(retriggerEntries); RetriggerEntry requiredEntry = new RetriggerEntry(new JobId(deployment.applicationId(), jobType), run.id().number() + 1); if(newList.stream().noneMatch(entry -> entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun()>=requiredEntry.requiredRun())) { newList.add(requiredEntry); } newList = newList.stream() .filter(entry -> !(entry.jobId().equals(requiredEntry.jobId()) && entry.requiredRun() < requiredEntry.requiredRun())) .collect(toList()); controller.curator().writeRetriggerEntries(newList); } controller.jobController().abort(run.id()); return Optional.empty(); } else { return Optional.of(reTrigger(deployment.applicationId(), jobType)); } } /** Prevents jobs of the given type from starting, until the given time. */ public void pauseJob(ApplicationId id, JobType jobType, Instant until) { if (until.isAfter(clock.instant().plus(maxPause))) throw new IllegalArgumentException("Pause only allowed for up to " + maxPause); applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> instance.withJobPause(jobType, OptionalLong.of(until.toEpochMilli()))))); } /** Resumes a previously paused job, letting it be triggered normally. */ public void resumeJob(ApplicationId id, JobType jobType) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> applications().store(application.with(id.instance(), instance -> instance.withJobPause(jobType, OptionalLong.empty())))); } /** Triggers a change of this application, unless it already has a change. */ public void triggerChange(ApplicationId instanceId, Change change) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).change().hasTargets()) forceChange(instanceId, change); }); } /** Overrides the given instance's platform and application changes with any contained in the given change. */ public void forceChange(ApplicationId instanceId, Change change) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { applications().store(application.with(instanceId.instance(), instance -> withRemainingChange(instance, change.onTopOf(application.get().require(instanceId.instance()).change()), jobs.deploymentStatus(application.get())))); }); } /** Cancels the indicated part of the given application's change. */ public void cancelChange(ApplicationId instanceId, ChangesToCancel cancellation) { applications().lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { Change change; switch (cancellation) { case ALL: change = Change.empty(); break; case VERSIONS: change = Change.empty().withPin(); break; case PLATFORM: change = application.get().require(instanceId.instance()).change().withoutPlatform(); break; case APPLICATION: change = application.get().require(instanceId.instance()).change().withoutApplication(); break; case PIN: change = application.get().require(instanceId.instance()).change().withoutPin(); break; default: throw new IllegalArgumentException("Unknown cancellation choice '" + cancellation + "'!"); } applications().store(application.with(instanceId.instance(), instance -> withRemainingChange(instance, change, jobs.deploymentStatus(application.get())))); }); } public enum ChangesToCancel { ALL, PLATFORM, APPLICATION, VERSIONS, PIN } private ApplicationController applications() { return controller.applications(); } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return jobs.deploymentStatuses(ApplicationList.from(applications().readable()) .withProjectId() .withDeploymentSpec()) .withChanges() .asList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(DeploymentStatus status) { List<Job> jobs = new ArrayList<>(); Map<JobId, List<DeploymentStatus.Job>> jobsToRun = status.jobsToRun(); jobsToRun.forEach((job, versionsList) -> { versionsList.get(0).readyAt() .filter(readyAt -> ! clock.instant().isBefore(readyAt)) .filter(__ -> ! (job.type().isProduction() && isUnhealthyInAnotherZone(status.application(), job))) .filter(__ -> abortIfRunning(status, jobsToRun, job)) .map(readyAt -> deploymentJob(status.application().require(job.application().instance()), versionsList.get(0).versions(), job.type(), status.instanceJobs(job.application().instance()).get(job.type()), readyAt)) .ifPresent(jobs::add); }); return Collections.unmodifiableList(jobs); } /** Returns whether the application is healthy in all other production zones. */ private boolean isUnhealthyInAnotherZone(Application application, JobId job) { for (Deployment deployment : application.require(job.application().instance()).productionDeployments().values()) { if ( ! deployment.zone().equals(job.type().zone(controller.system())) && ! controller.applications().isHealthy(new DeploymentId(job.application(), deployment.zone()))) return true; } return false; } private void abortIfOutdated(DeploymentStatus status, Map<JobId, List<DeploymentStatus.Job>> jobs, JobId job) { status.jobs().get(job) .flatMap(JobStatus::lastTriggered) .filter(last -> ! last.hasEnded()) .ifPresent(last -> { if (jobs.get(job).stream().noneMatch(versions -> versions.versions().targetsMatch(last.versions()) && versions.versions().sourcesMatchIfPresent(last.versions()))) { log.log(Level.INFO, "Aborting outdated run " + last); controller.jobController().abort(last.id()); } }); } /** Returns whether the job is free to start, and also aborts it if it's running with outdated versions. */ private boolean acceptNewApplicationVersion(DeploymentStatus status, InstanceName instance, ApplicationVersion version) { if (status.application().deploymentSpec().instance(instance).isEmpty()) return false; if (status.hasFailures(version)) return true; DeploymentInstanceSpec spec = status.application().deploymentSpec().requireInstance(instance); Change change = status.application().require(instance).change(); return change.application().isEmpty() || spec.upgradeRevision() != DeploymentSpec.UpgradeRevision.separate; } private Instance withRemainingChange(Instance instance, Change change, DeploymentStatus status) { Change remaining = change; if (status.jobsToRun(Map.of(instance.name(), change.withoutApplication())).isEmpty()) remaining = remaining.withoutPlatform(); if (status.jobsToRun(Map.of(instance.name(), change.withoutPlatform())).isEmpty()) { remaining = remaining.withoutApplication(); if (change.application().isPresent()) instance = instance.withLatestDeployed(change.application().get()); } return instance.withChange(remaining); } private Job deploymentJob(Instance instance, Versions versions, JobType jobType, JobStatus jobStatus, Instant availableSince) { return new Job(instance, versions, jobType, availableSince, jobStatus.isOutOfCapacity(), instance.change().application().isPresent()); } private static class Job { private final ApplicationId instanceId; private final JobType jobType; private final Versions versions; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Instance instance, Versions versions, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { this.instanceId = instance.id(); this.jobType = jobType; this.versions = versions; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } ApplicationId applicationId() { return instanceId; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } @Override public String toString() { return jobType + " for " + instanceId + " on (" + versions.targetPlatform() + versions.sourcePlatform().map(version -> " <-- " + version).orElse("") + ", " + versions.targetApplication().id() + versions.sourceApplication().map(version -> " <-- " + version.id()).orElse("") + "), ready since " + availableSince; } } }
Should probably make a job in between production-us-east-3 and this fail, so we see that the upgrade-only run for test-us-east-3 is abandoned, and the revision is deployed earlier, to potentially fix that upgrade failure.
public void testDeployComplicatedDeploymentSpec() { String complicatedDeploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <parallel>\n" + " <instance id='instance' athenz-service='in-service'>\n" + " <staging />\n" + " <prod>\n" + " <parallel>\n" + " <region active='true'>us-west-1</region>\n" + " <steps>\n" + " <region active='true'>us-east-3</region>\n" + " <delay hours='2' />\n" + " <region active='true'>eu-west-1</region>\n" + " <delay hours='2' />\n" + " </steps>\n" + " <steps>\n" + " <delay hours='3' />\n" + " <region active='true'>aws-us-east-1a</region>\n" + " <parallel>\n" + " <region active='true' athenz-service='no-service'>ap-northeast-1</region>\n" + " <region active='true'>ap-northeast-2</region>\n" + " <test>aws-us-east-1a</test>\n" + " </parallel>\n" + " </steps>\n" + " <delay hours='3' minutes='30' />\n" + " </parallel>\n" + " <parallel>\n" + " <test>ap-northeast-2</test>\n" + " <test>ap-northeast-1</test>\n" + " </parallel>\n" + " <test>us-east-3</test>\n" + " <region active='true'>ap-southeast-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id='foo' container-id='bar'>\n" + " <region>us-east-3</region>\n" + " </endpoint>\n" + " <endpoint id='nalle' container-id='frosk' />\n" + " <endpoint container-id='quux' />\n" + " </endpoints>\n" + " </instance>\n" + " <instance id='other'>\n" + " <upgrade policy='conservative' />\n" + " <test />\n" + " <block-change revision='true' version='false' days='sat' hours='0-23' time-zone='CET' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " <notifications when='failing'>\n" + " <email role='author' />\n" + " <email address='john@dev' when='failing-commit' />\n" + " <email address='jane@dev' />\n" + " </notifications>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='last'>\n" + " <upgrade policy='conservative' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(complicatedDeploymentSpec); var app1 = tester.newDeploymentContext("t", "a", "instance").submit(applicationPackage); var app2 = tester.newDeploymentContext("t", "a", "other"); var app3 = tester.newDeploymentContext("t", "a", "last"); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(systemTest); app1.runJob(productionUsWest1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.assertNotRunning(testEuWest1); app2.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(testEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app1.runJob(testAwsUsEast1a); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApNortheast1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); app1.runJob(testApNortheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApSoutheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.atMondayMorning().clock().advance(Duration.ofDays(5)); Version version = Version.fromString("8.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); assertEquals(Change.of(version), app1.instance().change()); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app1.submit(applicationPackage); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app1.instance().change()); app2.runJob(systemTest); app1.runJob(stagingTest) .runJob(productionUsWest1) .runJob(productionUsEast3); tester.clock().advance(Duration.ofSeconds(1)); app2.runJob(systemTest); app1.runJob(stagingTest) .runJob(productionUsWest1); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); app1.runJob(testAwsUsEast1a); tester.clock().advance(Duration.ofSeconds(1)); app1.runJob(productionAwsUsEast1a); app1.runJob(testAwsUsEast1a); app1.runJob(productionApNortheast2); app1.runJob(productionApNortheast1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(testApNortheast1); app1.runJob(testApNortheast2); app1.runJob(productionApNortheast2); app1.runJob(productionApNortheast1); app1.runJob(testUsEast3); app1.runJob(productionApSoutheast1); tester.clock().advance(Duration.ofSeconds(1)); app1.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.clock().advance(Duration.ofMinutes(330)); app1.runJob(testApNortheast1); app1.runJob(testApNortheast2); app1.runJob(testUsEast3); app1.runJob(productionApSoutheast1); app1.runJob(stagingTest); app2.runJob(systemTest); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); tester.triggerJobs(); assertEquals(tester.jobs().active().toString(), 1, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app2.runJob(productionEuWest1) .failDeployment(testEuWest1); tester.clock().advance(Duration.ofDays(1)); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(0, tester.jobs().active().size()); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app2.instance().change()); app2.runJob(productionEuWest1) .runJob(testEuWest1); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(version).with(app1.lastSubmission().get()), app3.instance().change()); tester.deploymentTrigger().cancelChange(app3.instanceId(), ALL); tester.outstandingChangeDeployer().run(); tester.upgrader().maintain(); assertEquals(Change.of(app1.lastSubmission().get()), app3.instance().change()); app3.runJob(productionEuWest1); tester.upgrader().maintain(); app1.runJob(stagingTest); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); assertEquals(Change.empty(), app3.instance().change()); }
app1.runJob(testUsEast3);
public void testDeployComplicatedDeploymentSpec() { String complicatedDeploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <parallel>\n" + " <instance id='instance' athenz-service='in-service'>\n" + " <staging />\n" + " <prod>\n" + " <parallel>\n" + " <region active='true'>us-west-1</region>\n" + " <steps>\n" + " <region active='true'>us-east-3</region>\n" + " <delay hours='2' />\n" + " <region active='true'>eu-west-1</region>\n" + " <delay hours='2' />\n" + " </steps>\n" + " <steps>\n" + " <delay hours='3' />\n" + " <region active='true'>aws-us-east-1a</region>\n" + " <parallel>\n" + " <region active='true' athenz-service='no-service'>ap-northeast-1</region>\n" + " <region active='true'>ap-northeast-2</region>\n" + " <test>aws-us-east-1a</test>\n" + " </parallel>\n" + " </steps>\n" + " <delay hours='3' minutes='30' />\n" + " </parallel>\n" + " <parallel>\n" + " <test>ap-northeast-2</test>\n" + " <test>ap-northeast-1</test>\n" + " </parallel>\n" + " <test>us-east-3</test>\n" + " <region active='true'>ap-southeast-1</region>\n" + " </prod>\n" + " <endpoints>\n" + " <endpoint id='foo' container-id='bar'>\n" + " <region>us-east-3</region>\n" + " </endpoint>\n" + " <endpoint id='nalle' container-id='frosk' />\n" + " <endpoint container-id='quux' />\n" + " </endpoints>\n" + " </instance>\n" + " <instance id='other'>\n" + " <upgrade policy='conservative' />\n" + " <test />\n" + " <block-change revision='true' version='false' days='sat' hours='0-23' time-zone='CET' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " <notifications when='failing'>\n" + " <email role='author' />\n" + " <email address='john@dev' when='failing-commit' />\n" + " <email address='jane@dev' />\n" + " </notifications>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='last'>\n" + " <upgrade policy='conservative' />\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(complicatedDeploymentSpec); var app1 = tester.newDeploymentContext("t", "a", "instance").submit(applicationPackage); var app2 = tester.newDeploymentContext("t", "a", "other"); var app3 = tester.newDeploymentContext("t", "a", "last"); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(systemTest); app1.runJob(productionUsWest1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.assertNotRunning(testEuWest1); app2.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app2.runJob(testEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app1.runJob(testAwsUsEast1a); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app1.runJob(productionApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApNortheast1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.clock().advance(Duration.ofMinutes(30)); app1.runJob(testApNortheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testApNortheast2); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(testUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app1.runJob(productionApSoutheast1); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); tester.atMondayMorning().clock().advance(Duration.ofDays(5)); Version version = Version.fromString("8.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); assertEquals(Change.of(version), app1.instance().change()); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app1.submit(applicationPackage); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app1.instance().change()); app2.runJob(systemTest); app1.runJob(stagingTest) .runJob(productionUsWest1) .runJob(productionUsEast3); tester.clock().advance(Duration.ofSeconds(1)); app2.runJob(systemTest); app1.runJob(stagingTest) .runJob(productionUsWest1); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(productionAwsUsEast1a); app1.runJob(testAwsUsEast1a); tester.clock().advance(Duration.ofSeconds(1)); app1.runJob(productionAwsUsEast1a); app1.runJob(testAwsUsEast1a); app1.runJob(productionApNortheast2); app1.runJob(productionApNortheast1); tester.clock().advance(Duration.ofHours(1)); app1.runJob(testApNortheast1); app1.runJob(testApNortheast2); app1.runJob(productionApNortheast2); app1.runJob(productionApNortheast1); app1.runJob(testUsEast3); app1.runJob(productionApSoutheast1); tester.clock().advance(Duration.ofSeconds(1)); app1.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(2)); app1.runJob(productionEuWest1); tester.clock().advance(Duration.ofMinutes(330)); app1.runJob(testApNortheast1); app1.runJob(testApNortheast2); app1.runJob(testUsEast3); app1.runJob(productionApSoutheast1); app1.runJob(stagingTest); app2.runJob(systemTest); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); tester.triggerJobs(); assertEquals(tester.jobs().active().toString(), 1, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); app2.runJob(productionEuWest1) .failDeployment(testEuWest1); tester.clock().advance(Duration.ofDays(1)); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(0, tester.jobs().active().size()); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Change.of(version).with(app1.application().latestVersion().get()), app2.instance().change()); app2.runJob(productionEuWest1) .runJob(testEuWest1); assertEquals(Change.empty(), app2.instance().change()); assertEquals(Change.empty(), app3.instance().change()); tester.upgrader().maintain(); tester.outstandingChangeDeployer().run(); assertEquals(Change.of(version).with(app1.lastSubmission().get()), app3.instance().change()); tester.deploymentTrigger().cancelChange(app3.instanceId(), ALL); tester.outstandingChangeDeployer().run(); tester.upgrader().maintain(); assertEquals(Change.of(app1.lastSubmission().get()), app3.instance().change()); app3.runJob(productionEuWest1); tester.upgrader().maintain(); app1.runJob(stagingTest); app3.runJob(productionEuWest1); tester.triggerJobs(); assertEquals(List.of(), tester.jobs().active()); assertEquals(Change.empty(), app3.instance().change()); }
class DeploymentTriggerTest { private final DeploymentTester tester = new DeploymentTester(); @Test public void testTriggerFailing() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version version = Version.fromString("6.3"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.failDeployment(stagingTest); tester.triggerJobs(); assertEquals("Retried dead job", 2, tester.jobs().active().size()); app.assertRunning(stagingTest); app.runJob(stagingTest); app.assertRunning(systemTest); assertEquals(1, tester.jobs().active().size()); app.timeOutUpgrade(systemTest); tester.triggerJobs(); assertEquals("Job is retried on failure", 1, tester.jobs().active().size()); app.runJob(systemTest); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.applications().lockApplicationOrThrow(app.application().id(), locked -> tester.applications().store(locked.withProjectId(OptionalLong.empty()))); app.timeOutConvergence(productionUsWest1); tester.triggerJobs(); assertEquals("Job is not triggered when no projectId is present", 0, tester.jobs().active().size()); } @Test public void separateRevisionMakesApplicationChangeWaitForPreviousToComplete() { DeploymentContext app = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradeRevision(null) .region("us-east-3") .test("us-east-3") .build(); app.submit(applicationPackage).runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); Optional<ApplicationVersion> v0 = app.lastSubmission(); app.submit(applicationPackage); Optional<ApplicationVersion> v1 = app.lastSubmission(); assertEquals(v0, app.instance().change().application()); app.runJob(systemTest).runJob(stagingTest); app.runJob(testUsEast3); assertEquals(Optional.empty(), app.instance().change().application()); tester.outstandingChangeDeployer().run(); assertEquals(v1, app.instance().change().application()); app.runJob(productionUsEast3).failDeployment(testUsEast3); app.submit(applicationPackage); Optional<ApplicationVersion> v2 = app.lastSubmission(); assertEquals(v2, app.instance().change().application()); } @Test public void leadingUpgradeAllowsApplicationChangeWhileUpgrading() { var applicationPackage = new ApplicationPackageBuilder().region("us-east-3") .upgradeRollout("leading") .build(); var app = tester.newDeploymentContext(); app.submit(applicationPackage).deploy(); Change upgrade = Change.of(new Version("7.8.9")); tester.controllerTester().upgradeSystem(upgrade.platform().get()); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertRunning(productionUsEast3); assertEquals(upgrade, app.instance().change()); app.submit(applicationPackage); assertEquals(upgrade.with(app.lastSubmission().get()), app.instance().change()); } @Test public void abortsJobsOnNewApplicationChange() { var app = tester.newDeploymentContext(); app.submit() .runJob(systemTest) .runJob(stagingTest); tester.triggerJobs(); RunId id = tester.jobs().last(app.instanceId(), productionUsCentral1).get().id(); assertTrue(tester.jobs().active(id).isPresent()); app.submit(); assertTrue(tester.jobs().active(id).isPresent()); tester.triggerJobs(); tester.runner().run(); assertTrue(tester.jobs().active(id).isPresent()); app.runJob(systemTest).runJob(stagingTest).runJob(stagingTest); tester.triggerJobs(); app.jobAborted(productionUsCentral1); app.runJob(productionUsCentral1).runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.controllerTester().upgradeSystem(new Version("8.9")); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.clock().advance(Duration.ofMinutes(1)); tester.triggerJobs(); app.submit(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.runner().run(); assertEquals(EnumSet.of(productionUsCentral1), tester.jobs().active().stream() .map(run -> run.id().type()) .collect(Collectors.toCollection(() -> EnumSet.noneOf(JobType.class)))); } @Test public void deploymentSpecWithDelays() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .systemTest() .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest); tester.clock().advance(Duration.ofSeconds(15)); app.runJob(stagingTest); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); tester.clock().advance(Duration.ofSeconds(15)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.triggerJobs(); app.runJob(productionUsCentral1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.triggerJobs(); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionUsCentral1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionEuWest1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void testNoOtherChangesDuringSuspension() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .build(); var application = tester.newDeploymentContext().submit().deploy(); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), true); application.submit() .runJob(systemTest) .runJob(stagingTest) .runJob(productionUsCentral1); tester.triggerJobs(); application.assertNotRunning(productionUsEast3); application.assertNotRunning(productionUsWest1); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), false); tester.triggerJobs(); application.runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), application.instance().change()); } @Test public void testBlockRevisionChange() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); Version version = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.clock().advance(Duration.ofHours(1)); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); app.submit(applicationPackage); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(systemTest).runJob(stagingTest); tester.outstandingChangeDeployer().run(); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); assertEquals(emptyList(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(2)); tester.outstandingChangeDeployer().run(); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); app.assertRunning(productionUsWest1); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(true, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); app.submit(applicationPackage); app.runJob(productionUsWest1); assertEquals(1, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.deploymentStatus().outstandingChange(app.instance().name()).application().get().buildNumber().getAsLong()); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app.runJob(productionUsEast3); assertEquals(2, tester.jobs().active().size()); assertEquals(Change.empty(), app.instance().change()); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); tester.outstandingChangeDeployer().run(); assertTrue(app.instance().change().hasTargets()); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(productionUsWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); } @Test public void testJobPause() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.controllerTester().upgradeSystem(new Version("9.8.7")); tester.upgrader().maintain(); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsEast3, tester.clock().instant().plus(Duration.ofSeconds(3))); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1500)); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); app.failDeployment(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1000)); tester.triggerJobs(); app.runJob(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsEast3); tester.deploymentTrigger().forceTrigger(app.instanceId(), productionUsEast3, "mrTrigger", true); app.assertRunning(productionUsEast3); assertFalse(app.instance().jobPause(productionUsEast3).isPresent()); } @Test public void applicationVersionIsNotDowngraded() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest) .timeOutUpgrade(productionUsCentral1); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(Change.of(appVersion1), app.instance().change()); tester.deploymentTrigger().cancelChange(app.instanceId(), ALL); assertEquals(Change.empty(), app.instance().change()); Version version1 = new Version("6.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).failDeployment(productionUsCentral1); app.runJob(systemTest).runJob(stagingTest); app.runJob(productionUsCentral1).runJob(productionEuWest1); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test public void downgradingApplicationVersionWorks() { var app = tester.newDeploymentContext().submit().deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion0)); assertEquals(Change.of(appVersion0), app.instance().change()); app.runJob(stagingTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); assertEquals(Change.empty(), app.instance().change()); assertEquals(appVersion0, app.instance().deployments().get(productionUsEast3.zone(tester.controller().system())).applicationVersion()); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); } @Test public void settingANoOpChangeIsANoOp() { var app = tester.newDeploymentContext().submit(); assertEquals(Optional.empty(), app.instance().latestDeployed()); app.deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion1)); assertEquals(Change.empty(), app.instance().change()); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); } @Test public void stepIsCompletePreciselyWhenItShouldBe() { var app1 = tester.newDeploymentContext("tenant1", "app1", "default"); var app2 = tester.newDeploymentContext("tenant1", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); Version version0 = Version.fromString("7.0"); tester.controllerTester().upgradeSystem(version0); app1.submit(applicationPackage).deploy(); app2.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app2.deployPlatform(version1); tester.deploymentTrigger().cancelChange(app1.instanceId(), ALL); Version version2 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); tester.triggerJobs(); app1.jobAborted(systemTest).jobAborted(stagingTest); app1.runJob(systemTest).runJob(stagingTest).timeOutConvergence(productionUsCentral1); assertEquals(version2, app1.deployment(productionUsCentral1.zone(main)).version()); Instant triggered = app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start(); tester.clock().advance(Duration.ofHours(1)); tester.upgrader().overrideConfidence(version2, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); assertEquals("Change becomes latest non-broken version", Change.of(version1), app1.instance().change()); app1.runJob(systemTest).runJob(stagingTest) .failDeployment(productionEuWest1); assertEquals(triggered, app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start()); ApplicationVersion revision1 = app1.lastSubmission().get(); app1.submit(applicationPackage); ApplicationVersion revision2 = app1.lastSubmission().get(); app1.runJob(systemTest).runJob(stagingTest); assertEquals(Change.of(version1).with(revision2), app1.instance().change()); tester.triggerJobs(); app1.assertRunning(productionUsCentral1); assertEquals(version2, app1.instance().deployments().get(productionUsCentral1.zone(main)).version()); assertEquals(revision1, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); assertTrue(triggered.isBefore(app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start())); app1.timeOutUpgrade(productionUsCentral1); assertEquals(version2, app1.deployment(productionUsCentral1.zone(main)).version()); assertEquals(revision2, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); tester.clock().advance(Duration.ofHours(3)); tester.triggerJobs(); app1.assertNotRunning(productionUsCentral1); app1.runJob(systemTest) .runJob(stagingTest) .runJob(productionEuWest1) .runJob(productionUsCentral1) .runJob(productionEuWest1); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Optional.of(RunStatus.success), app1.instanceJobs().get(productionUsCentral1).lastStatus()); } @Test public void eachParallelDeployTargetIsTested() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .parallel("eu-west-1", "us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); app.timeOutConvergence(productionEuWest1); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(v2, app.deployment(productionEuWest1.zone(main)).version()); assertEquals(v1, app.deployment(productionUsEast3.zone(main)).version()); app.submit(applicationPackage); tester.triggerJobs(); Version firstTested = app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform(); assertEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertNotEquals(firstTested, app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform()); assertNotEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); app.triggerJobs().jobAborted(productionUsEast3); app.failDeployment(productionEuWest1).failDeployment(productionUsEast3) .runJob(productionEuWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); assertEquals(2, app.instanceJobs().get(productionEuWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); } @Test public void retriesFailingJobs() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage).runJob(stagingTest).failDeployment(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertRunning(systemTest); tester.clock().advance(Duration.ofSeconds(1)); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofMinutes(10).plus(Duration.ofSeconds(1))); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.clock().advance(Duration.ofMinutes(15)); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofSeconds(2)); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); app.submit(applicationPackage).deploy(); assertTrue("Deployment completed", tester.jobs().active().isEmpty()); } @Test public void testPlatformVersionSelection() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); Version version1 = tester.controller().readSystemVersion(); var app1 = tester.newDeploymentContext(); app1.submit(applicationPackage).deploy(); assertEquals("First deployment gets system version", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); Version version2 = new Version(version1.getMajor(), version1.getMinor() + 1); tester.controllerTester().upgradeSystem(version2); applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); app1.submit(applicationPackage).deploy(); assertEquals("Application change preserves version, and new region gets oldest version too", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); assertFalse("Change deployed", app1.instance().change().hasTargets()); tester.upgrader().maintain(); app1.deployPlatform(version2); assertEquals("Version upgrade changes version", version2, app1.application().oldestDeployedPlatform().get()); assertEquals(version2, tester.configServer().lastPrepareVersion().get()); } @Test public void requeueOutOfCapacityStagingJob() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); var app1 = tester.newDeploymentContext("tenant1", "app1", "default").submit(applicationPackage); var app2 = tester.newDeploymentContext("tenant2", "app2", "default").submit(applicationPackage); var app3 = tester.newDeploymentContext("tenant3", "app3", "default").submit(applicationPackage); app2.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app1.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app3.runJob(systemTest); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); tester.abortAll(); assertEquals(List.of(), tester.jobs().active()); tester.readyJobsTrigger().maintain(); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app3.outOfCapacity(stagingTest); app1.abortJob(stagingTest); app2.abortJob(stagingTest); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app2.deploy(); app3.deploy(); app1.runJob(stagingTest); assertEquals(0, tester.jobs().active().size()); tester.controllerTester().upgradeSystem(new Version("6.2")); tester.upgrader().maintain(); app1.submit(applicationPackage); tester.readyJobsTrigger().run(); app1.assertRunning(systemTest); app1.assertRunning(stagingTest); assertEquals(2, tester.jobs().active().size()); tester.triggerJobs(); app3.outOfCapacity(systemTest); app1.abortJob(systemTest); app1.abortJob(stagingTest); app2.abortJob(systemTest); app2.abortJob(stagingTest); app3.abortJob(stagingTest); assertEquals(0, tester.jobs().active().size()); assertTrue(app1.instance().change().application().isPresent()); assertFalse(app2.instance().change().application().isPresent()); assertFalse(app3.instance().change().application().isPresent()); tester.readyJobsTrigger().maintain(); app1.assertRunning(stagingTest); app3.assertRunning(systemTest); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app1.assertRunning(systemTest); assertEquals(4, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); app2.assertRunning(stagingTest); app2.assertRunning(systemTest); assertEquals(6, tester.jobs().active().size()); } @Test public void testUserInstancesNotInDeploymentSpec() { var app = tester.newDeploymentContext(); tester.controller().applications().createInstance(app.application().id().instance("user")); app.submit().deploy(); } @Test public void testMultipleInstancesWithDifferentChanges() { DeploymentContext i1 = tester.newDeploymentContext("t", "a", "i1"); DeploymentContext i2 = tester.newDeploymentContext("t", "a", "i2"); DeploymentContext i3 = tester.newDeploymentContext("t", "a", "i3"); DeploymentContext i4 = tester.newDeploymentContext("t", "a", "i4"); ApplicationPackage applicationPackage = ApplicationPackageBuilder .fromDeploymentXml("<deployment version='1'>\n" + " <upgrade revision='separate' />\n" + " <parallel>\n" + " <instance id='i1'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='6' />\n" + " </prod>\n" + " </instance>\n" + " <instance id='i2'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='i3'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='18' />\n" + " <test>us-east-3</test>\n" + " </prod>\n" + " </instance>\n" + " <instance id='i4'>\n" + " <test />\n" + " <staging />\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"); i1.submit(applicationPackage); Optional<ApplicationVersion> v0 = i1.lastSubmission(); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().change().application()); assertEquals(v0, i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().latestDeployed()); assertEquals(v0, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(6)); tester.outstandingChangeDeployer().run(); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i3.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(12)); i1.submit(applicationPackage); Optional<ApplicationVersion> v1 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); assertEquals(v1, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(3)); i1.submit(applicationPackage); Optional<ApplicationVersion> v2 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(3)); tester.outstandingChangeDeployer().run(); assertEquals(v0, i3.instance().change().application()); i3.runJob(testUsEast3); assertEquals(Optional.empty(), i3.instance().change().application()); tester.outstandingChangeDeployer().run(); assertEquals(v2, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(v0, i3.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(v2, i2.instance().change().application()); assertEquals(v1, i3.instance().change().application()); assertEquals(v0, i4.instance().change().application()); } @Test public void testMultipleInstances() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("instance1,instance2") .region("us-east-3") .build(); var app = tester.newDeploymentContext("tenant1", "application1", "instance1") .submit(applicationPackage) .completeRollout(); assertEquals(2, app.application().instances().size()); assertEquals(2, app.application().productionDeployments().values().stream() .mapToInt(Collection::size) .sum()); } @Test public void testDeclaredProductionTests() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .delay(Duration.ofMinutes(1)) .test("us-east-3") .region("us-west-1") .region("us-central-1") .test("us-central-1") .test("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(1)); app.runJob(testUsEast3) .runJob(productionUsWest1).runJob(productionUsCentral1) .runJob(testUsCentral1).runJob(testUsWest1); assertEquals(Change.empty(), app.instance().change()); Version version0 = app.application().oldestDeployedPlatform().get(); Version version1 = Version.fromString("7.7"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.triggerJobs(); app.assertRunning(testUsEast3); tester.upgrader().overrideConfidence(version1, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); app.failDeployment(testUsEast3); app.assertNotRunning(testUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version0).withPin()); app.runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.clock().advance(Duration.ofMinutes(11)); app.runJob(testUsEast3); assertEquals(Change.empty().withPin(), app.instance().change()); } @Test @Test public void testRevisionJoinsUpgradeWithSeparateRollout() { var app = tester.newDeploymentContext().submit().deploy(); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsCentral1); tester.clock().advance(Duration.ofMinutes(1)); app.submit(); assertEquals(Change.of(version).with(app.lastSubmission().get()), app.instance().change()); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsCentral1); app.runJob(productionUsEast3).runJob(productionUsWest1); tester.triggerJobs(); assertEquals(Change.of(app.lastSubmission().get()), app.instance().change()); app.runJob(productionUsEast3).runJob(productionUsWest1); assertEquals(Change.empty(), app.instance().change()); } @Test public void mixedDirectAndPipelineJobsInProduction() { ApplicationPackage cdPackage = new ApplicationPackageBuilder().region("cd-us-east-1") .region("cd-aws-us-east-1a") .build(); var zones = List.of(ZoneId.from("test.cd-us-west-1"), ZoneId.from("staging.cd-us-west-1"), ZoneId.from("prod.cd-us-east-1"), ZoneId.from("prod.cd-aws-us-east-1a")); tester.controllerTester() .setZones(zones, SystemName.cd) .setRoutingMethod(zones, RoutingMethod.shared); tester.controllerTester().upgradeSystem(Version.fromString("6.1")); tester.controllerTester().computeVersionStatus(); var app = tester.newDeploymentContext(); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(systemTest) .jobAborted(stagingTest) .runJob(systemTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .jobAborted(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); } @Test public void testsInSeparateInstance() { String deploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id='canary'>\n" + " <upgrade policy='canary' />\n" + " <test />\n" + " <staging />\n" + " </instance>\n" + " <instance id='default'>\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentSpec); var canary = tester.newDeploymentContext("t", "a", "canary").submit(applicationPackage); var conservative = tester.newDeploymentContext("t", "a", "default"); canary.runJob(systemTest) .runJob(stagingTest); conservative.runJob(productionEuWest1) .runJob(testEuWest1); canary.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest); tester.outstandingChangeDeployer().run(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); tester.controllerTester().upgradeSystem(new Version("7.7.7")); tester.upgrader().maintain(); canary.runJob(systemTest) .runJob(stagingTest); tester.upgrader().maintain(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); } @Test public void testEagerTests() { var app = tester.newDeploymentContext().submit().deploy(); Version version1 = new Version("7.8.9"); ApplicationVersion build1 = app.lastSubmission().get(); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(stagingTest); app.submit(); ApplicationVersion build2 = app.lastSubmission().get(); assertNotEquals(build1, build2); tester.triggerJobs(); app.assertRunning(stagingTest); assertEquals(version1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetPlatform()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetApplication()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourcePlatform().get()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourceApplication().get()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); assertEquals(build2, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetApplication()); app.runJob(systemTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); tester.outstandingChangeDeployer().run(); assertEquals(RunStatus.running, tester.jobs().last(app.instanceId(), stagingTest).get().status()); app.runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(stagingTest); } @Test public void testTriggeringOfIdleTestJobsWhenFirstDeploymentIsOnNewerVersionThanChange() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder().systemTest() .stagingTest() .region("us-east-3") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); var appToAvoidVersionGC = tester.newDeploymentContext("g", "c", "default").submit().deploy(); Version version2 = new Version("7.8.9"); Version version3 = new Version("8.9.10"); tester.controllerTester().upgradeSystem(version2); tester.deploymentTrigger().triggerChange(appToAvoidVersionGC.instanceId(), Change.of(version2)); appToAvoidVersionGC.deployPlatform(version2); tester.controllerTester().upgradeSystem(version3); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version3)); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.upgrader().overrideConfidence(version3, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().run(); assertEquals(Optional.of(version2), app.instance().change().platform()); app.runJob(systemTest) .runJob(productionUsEast3) .runJob(stagingTest) .runJob(productionUsWest1); assertEquals(version3, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetPlatform()); assertEquals(version2, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetPlatform()); assertEquals(Map.of(), app.deploymentStatus().jobsToRun()); assertEquals(Change.empty(), app.instance().change()); assertEquals(List.of(), tester.jobs().active()); } @Test public void testRetriggerQueue() { var app = tester.newDeploymentContext().submit().deploy(); app.submit(); tester.triggerJobs(); tester.deploymentTrigger().reTrigger(app.instanceId(), productionUsEast3); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries(); Assert.assertEquals(1, retriggerEntries.size()); } }
class DeploymentTriggerTest { private final DeploymentTester tester = new DeploymentTester(); @Test public void testTriggerFailing() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("default") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version version = Version.fromString("6.3"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.failDeployment(stagingTest); tester.triggerJobs(); assertEquals("Retried dead job", 2, tester.jobs().active().size()); app.assertRunning(stagingTest); app.runJob(stagingTest); app.assertRunning(systemTest); assertEquals(1, tester.jobs().active().size()); app.timeOutUpgrade(systemTest); tester.triggerJobs(); assertEquals("Job is retried on failure", 1, tester.jobs().active().size()); app.runJob(systemTest); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.applications().lockApplicationOrThrow(app.application().id(), locked -> tester.applications().store(locked.withProjectId(OptionalLong.empty()))); app.timeOutConvergence(productionUsWest1); tester.triggerJobs(); assertEquals("Job is not triggered when no projectId is present", 0, tester.jobs().active().size()); } @Test public void separateRevisionMakesApplicationChangeWaitForPreviousToComplete() { DeploymentContext app = tester.newDeploymentContext(); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradeRevision(null) .region("us-east-3") .test("us-east-3") .build(); app.submit(applicationPackage).runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); Optional<ApplicationVersion> v0 = app.lastSubmission(); app.submit(applicationPackage); Optional<ApplicationVersion> v1 = app.lastSubmission(); assertEquals(v0, app.instance().change().application()); app.runJob(systemTest).runJob(stagingTest); app.runJob(testUsEast3); assertEquals(Optional.empty(), app.instance().change().application()); tester.outstandingChangeDeployer().run(); assertEquals(v1, app.instance().change().application()); app.runJob(productionUsEast3).failDeployment(testUsEast3); app.submit(applicationPackage); Optional<ApplicationVersion> v2 = app.lastSubmission(); assertEquals(v2, app.instance().change().application()); } @Test public void leadingUpgradeAllowsApplicationChangeWhileUpgrading() { var applicationPackage = new ApplicationPackageBuilder().region("us-east-3") .upgradeRollout("leading") .build(); var app = tester.newDeploymentContext(); app.submit(applicationPackage).deploy(); Change upgrade = Change.of(new Version("7.8.9")); tester.controllerTester().upgradeSystem(upgrade.platform().get()); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertRunning(productionUsEast3); assertEquals(upgrade, app.instance().change()); app.submit(applicationPackage); assertEquals(upgrade.with(app.lastSubmission().get()), app.instance().change()); } @Test public void abortsJobsOnNewApplicationChange() { var app = tester.newDeploymentContext(); app.submit() .runJob(systemTest) .runJob(stagingTest); tester.triggerJobs(); RunId id = tester.jobs().last(app.instanceId(), productionUsCentral1).get().id(); assertTrue(tester.jobs().active(id).isPresent()); app.submit(); assertTrue(tester.jobs().active(id).isPresent()); tester.triggerJobs(); tester.runner().run(); assertTrue(tester.jobs().active(id).isPresent()); app.runJob(systemTest).runJob(stagingTest).runJob(stagingTest); tester.triggerJobs(); app.jobAborted(productionUsCentral1); app.runJob(productionUsCentral1).runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.controllerTester().upgradeSystem(new Version("8.9")); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); tester.clock().advance(Duration.ofMinutes(1)); tester.triggerJobs(); app.submit(); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.runner().run(); assertEquals(EnumSet.of(productionUsCentral1), tester.jobs().active().stream() .map(run -> run.id().type()) .collect(Collectors.toCollection(() -> EnumSet.noneOf(JobType.class)))); } @Test public void deploymentSpecWithDelays() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .systemTest() .delay(Duration.ofSeconds(30)) .region("us-west-1") .delay(Duration.ofMinutes(2)) .delay(Duration.ofMinutes(2)) .region("us-central-1") .delay(Duration.ofMinutes(10)) .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest); tester.clock().advance(Duration.ofSeconds(15)); app.runJob(stagingTest); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); tester.clock().advance(Duration.ofSeconds(15)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(3)); tester.triggerJobs(); assertTrue("No more jobs triggered at this time", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(1)); tester.triggerJobs(); app.runJob(productionUsCentral1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); tester.clock().advance(Duration.ofMinutes(10)); tester.triggerJobs(); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void deploymentSpecWithParallelDeployments() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionUsCentral1); tester.triggerJobs(); assertEquals(2, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.assertRunning(productionUsWest1); app.runJob(productionUsWest1); assertEquals(1, tester.jobs().active().size()); app.assertRunning(productionUsEast3); app.runJob(productionUsEast3); tester.triggerJobs(); assertEquals(1, tester.jobs().active().size()); app.runJob(productionEuWest1); assertTrue("All jobs consumed", tester.jobs().active().isEmpty()); } @Test public void testNoOtherChangesDuringSuspension() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .parallel("us-west-1", "us-east-3") .build(); var application = tester.newDeploymentContext().submit().deploy(); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), true); application.submit() .runJob(systemTest) .runJob(stagingTest) .runJob(productionUsCentral1); tester.triggerJobs(); application.assertNotRunning(productionUsEast3); application.assertNotRunning(productionUsWest1); tester.configServer().setSuspension(application.deploymentIdIn(ZoneId.from("prod", "us-central-1")), false); tester.triggerJobs(); application.runJob(productionUsWest1).runJob(productionUsEast3); assertEquals(Change.empty(), application.instance().change()); } @Test public void testBlockRevisionChange() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); Version version = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(version); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .upgradePolicy("canary") .blockChange(true, false, "tue", "18-19", "UTC") .region("us-west-1") .region("us-central-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.clock().advance(Duration.ofHours(1)); tester.triggerJobs(); assertEquals(0, tester.jobs().active().size()); app.submit(applicationPackage); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(systemTest).runJob(stagingTest); tester.outstandingChangeDeployer().run(); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); assertEquals(emptyList(), tester.jobs().active()); tester.clock().advance(Duration.ofHours(2)); tester.outstandingChangeDeployer().run(); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); tester.triggerJobs(); app.assertRunning(productionUsWest1); } @Test public void testCompletionOfPartOfChangeDuringBlockWindow() { tester.at(Instant.parse("2017-09-26T17:30:00.00Z")); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .blockChange(true, true, "tue", "18", "UTC") .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = Version.fromString("6.1"); Version v2 = Version.fromString("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); app.submit(applicationPackage); app.runJob(productionUsWest1); assertEquals(1, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.deploymentStatus().outstandingChange(app.instance().name()).application().get().buildNumber().getAsLong()); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); app.runJob(productionUsEast3); assertEquals(2, tester.jobs().active().size()); assertEquals(Change.empty(), app.instance().change()); assertTrue(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(stagingTest).runJob(systemTest); tester.clock().advance(Duration.ofHours(1)); tester.outstandingChangeDeployer().run(); assertTrue(app.instance().change().hasTargets()); assertFalse(app.deploymentStatus().outstandingChange(app.instance().name()).hasTargets()); app.runJob(productionUsWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); } @Test public void testJobPause() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); tester.controllerTester().upgradeSystem(new Version("9.8.7")); tester.upgrader().maintain(); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsEast3, tester.clock().instant().plus(Duration.ofSeconds(3))); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1500)); tester.triggerJobs(); app.assertRunning(productionUsWest1); tester.deploymentTrigger().pauseJob(app.instanceId(), productionUsWest1, tester.clock().instant().plus(Duration.ofSeconds(1))); app.failDeployment(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMillis(1000)); tester.triggerJobs(); app.runJob(productionUsWest1); tester.triggerJobs(); app.assertNotRunning(productionUsEast3); tester.deploymentTrigger().forceTrigger(app.instanceId(), productionUsEast3, "mrTrigger", true); app.assertRunning(productionUsEast3); assertFalse(app.instance().jobPause(productionUsEast3).isPresent()); } @Test public void applicationVersionIsNotDowngraded() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest) .timeOutUpgrade(productionUsCentral1); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(Change.of(appVersion1), app.instance().change()); tester.deploymentTrigger().cancelChange(app.instanceId(), ALL); assertEquals(Change.empty(), app.instance().change()); Version version1 = new Version("6.2"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).failDeployment(productionUsCentral1); app.runJob(systemTest).runJob(stagingTest); app.runJob(productionUsCentral1).runJob(productionEuWest1); assertEquals(appVersion1, app.deployment(ZoneId.from("prod.us-central-1")).applicationVersion()); } @Test public void downgradingApplicationVersionWorks() { var app = tester.newDeploymentContext().submit().deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion0)); assertEquals(Change.of(appVersion0), app.instance().change()); app.runJob(stagingTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); assertEquals(Change.empty(), app.instance().change()); assertEquals(appVersion0, app.instance().deployments().get(productionUsEast3.zone(tester.controller().system())).applicationVersion()); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); } @Test public void settingANoOpChangeIsANoOp() { var app = tester.newDeploymentContext().submit(); assertEquals(Optional.empty(), app.instance().latestDeployed()); app.deploy(); ApplicationVersion appVersion0 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion0), app.instance().latestDeployed()); app.submit().deploy(); ApplicationVersion appVersion1 = app.lastSubmission().get(); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().forceChange(app.instanceId(), Change.of(appVersion1)); assertEquals(Change.empty(), app.instance().change()); assertEquals(Optional.of(appVersion1), app.instance().latestDeployed()); } @Test public void stepIsCompletePreciselyWhenItShouldBe() { var app1 = tester.newDeploymentContext("tenant1", "app1", "default"); var app2 = tester.newDeploymentContext("tenant1", "app2", "default"); ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .region("eu-west-1") .build(); Version version0 = Version.fromString("7.0"); tester.controllerTester().upgradeSystem(version0); app1.submit(applicationPackage).deploy(); app2.submit(applicationPackage).deploy(); Version version1 = Version.fromString("7.1"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app2.deployPlatform(version1); tester.deploymentTrigger().cancelChange(app1.instanceId(), ALL); Version version2 = Version.fromString("7.2"); tester.controllerTester().upgradeSystem(version2); tester.upgrader().maintain(); tester.triggerJobs(); app1.jobAborted(systemTest).jobAborted(stagingTest); app1.runJob(systemTest).runJob(stagingTest).timeOutConvergence(productionUsCentral1); assertEquals(version2, app1.deployment(productionUsCentral1.zone(main)).version()); Instant triggered = app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start(); tester.clock().advance(Duration.ofHours(1)); tester.upgrader().overrideConfidence(version2, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); assertEquals("Change becomes latest non-broken version", Change.of(version1), app1.instance().change()); app1.runJob(systemTest).runJob(stagingTest) .failDeployment(productionEuWest1); assertEquals(triggered, app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start()); ApplicationVersion revision1 = app1.lastSubmission().get(); app1.submit(applicationPackage); ApplicationVersion revision2 = app1.lastSubmission().get(); app1.runJob(systemTest).runJob(stagingTest); assertEquals(Change.of(version1).with(revision2), app1.instance().change()); tester.triggerJobs(); app1.assertRunning(productionUsCentral1); assertEquals(version2, app1.instance().deployments().get(productionUsCentral1.zone(main)).version()); assertEquals(revision1, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); assertTrue(triggered.isBefore(app1.instanceJobs().get(productionUsCentral1).lastTriggered().get().start())); app1.timeOutUpgrade(productionUsCentral1); assertEquals(version2, app1.deployment(productionUsCentral1.zone(main)).version()); assertEquals(revision2, app1.deployment(productionUsCentral1.zone(main)).applicationVersion()); tester.clock().advance(Duration.ofHours(3)); tester.triggerJobs(); app1.assertNotRunning(productionUsCentral1); app1.runJob(systemTest) .runJob(stagingTest) .runJob(productionEuWest1) .runJob(productionUsCentral1) .runJob(productionEuWest1); assertEquals(Change.empty(), app1.instance().change()); assertEquals(Optional.of(RunStatus.success), app1.instanceJobs().get(productionUsCentral1).lastStatus()); } @Test public void eachParallelDeployTargetIsTested() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .parallel("eu-west-1", "us-east-3") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); Version v1 = new Version("6.1"); Version v2 = new Version("6.2"); tester.controllerTester().upgradeSystem(v2); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest); app.timeOutConvergence(productionEuWest1); tester.deploymentTrigger().cancelChange(app.instanceId(), PLATFORM); assertEquals(v2, app.deployment(productionEuWest1.zone(main)).version()); assertEquals(v1, app.deployment(productionUsEast3.zone(main)).version()); app.submit(applicationPackage); tester.triggerJobs(); Version firstTested = app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform(); assertEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); assertNotEquals(firstTested, app.instanceJobs().get(systemTest).lastTriggered().get().versions().targetPlatform()); assertNotEquals(firstTested, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); app.runJob(systemTest).runJob(stagingTest); app.triggerJobs().jobAborted(productionUsEast3); app.failDeployment(productionEuWest1).failDeployment(productionUsEast3) .runJob(productionEuWest1).runJob(productionUsEast3); assertFalse(app.instance().change().hasTargets()); assertEquals(2, app.instanceJobs().get(productionEuWest1).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); assertEquals(2, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetApplication().buildNumber().getAsLong()); } @Test public void retriesFailingJobs() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-central-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); app.submit(applicationPackage).runJob(stagingTest).failDeployment(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertRunning(systemTest); tester.clock().advance(Duration.ofSeconds(1)); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofMinutes(10).plus(Duration.ofSeconds(1))); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.clock().advance(Duration.ofMinutes(15)); tester.triggerJobs(); app.assertNotRunning(systemTest); tester.clock().advance(Duration.ofSeconds(2)); tester.triggerJobs(); app.assertRunning(systemTest); app.failDeployment(systemTest); tester.triggerJobs(); app.assertNotRunning(systemTest); app.submit(applicationPackage).deploy(); assertTrue("Deployment completed", tester.jobs().active().isEmpty()); } @Test public void testPlatformVersionSelection() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .build(); Version version1 = tester.controller().readSystemVersion(); var app1 = tester.newDeploymentContext(); app1.submit(applicationPackage).deploy(); assertEquals("First deployment gets system version", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); Version version2 = new Version(version1.getMajor(), version1.getMinor() + 1); tester.controllerTester().upgradeSystem(version2); applicationPackage = new ApplicationPackageBuilder() .region("us-west-1") .region("us-east-3") .build(); app1.submit(applicationPackage).deploy(); assertEquals("Application change preserves version, and new region gets oldest version too", version1, app1.application().oldestDeployedPlatform().get()); assertEquals(version1, tester.configServer().lastPrepareVersion().get()); assertFalse("Change deployed", app1.instance().change().hasTargets()); tester.upgrader().maintain(); app1.deployPlatform(version2); assertEquals("Version upgrade changes version", version2, app1.application().oldestDeployedPlatform().get()); assertEquals(version2, tester.configServer().lastPrepareVersion().get()); } @Test public void requeueOutOfCapacityStagingJob() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .build(); var app1 = tester.newDeploymentContext("tenant1", "app1", "default").submit(applicationPackage); var app2 = tester.newDeploymentContext("tenant2", "app2", "default").submit(applicationPackage); var app3 = tester.newDeploymentContext("tenant3", "app3", "default").submit(applicationPackage); app2.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app1.runJob(systemTest); tester.clock().advance(Duration.ofMinutes(1)); app3.runJob(systemTest); tester.triggerJobs(); assertEquals(3, tester.jobs().active().size()); tester.abortAll(); assertEquals(List.of(), tester.jobs().active()); tester.readyJobsTrigger().maintain(); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app3.outOfCapacity(stagingTest); app1.abortJob(stagingTest); app2.abortJob(stagingTest); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); assertEquals(1, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); assertEquals(3, tester.jobs().active().size()); app2.deploy(); app3.deploy(); app1.runJob(stagingTest); assertEquals(0, tester.jobs().active().size()); tester.controllerTester().upgradeSystem(new Version("6.2")); tester.upgrader().maintain(); app1.submit(applicationPackage); tester.readyJobsTrigger().run(); app1.assertRunning(systemTest); app1.assertRunning(stagingTest); assertEquals(2, tester.jobs().active().size()); tester.triggerJobs(); app3.outOfCapacity(systemTest); app1.abortJob(systemTest); app1.abortJob(stagingTest); app2.abortJob(systemTest); app2.abortJob(stagingTest); app3.abortJob(stagingTest); assertEquals(0, tester.jobs().active().size()); assertTrue(app1.instance().change().application().isPresent()); assertFalse(app2.instance().change().application().isPresent()); assertFalse(app3.instance().change().application().isPresent()); tester.readyJobsTrigger().maintain(); app1.assertRunning(stagingTest); app3.assertRunning(systemTest); assertEquals(2, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app1.assertRunning(systemTest); assertEquals(4, tester.jobs().active().size()); tester.readyJobsTrigger().maintain(); app3.assertRunning(stagingTest); app2.assertRunning(stagingTest); app2.assertRunning(systemTest); assertEquals(6, tester.jobs().active().size()); } @Test public void testUserInstancesNotInDeploymentSpec() { var app = tester.newDeploymentContext(); tester.controller().applications().createInstance(app.application().id().instance("user")); app.submit().deploy(); } @Test public void testMultipleInstancesWithDifferentChanges() { DeploymentContext i1 = tester.newDeploymentContext("t", "a", "i1"); DeploymentContext i2 = tester.newDeploymentContext("t", "a", "i2"); DeploymentContext i3 = tester.newDeploymentContext("t", "a", "i3"); DeploymentContext i4 = tester.newDeploymentContext("t", "a", "i4"); ApplicationPackage applicationPackage = ApplicationPackageBuilder .fromDeploymentXml("<deployment version='1'>\n" + " <upgrade revision='separate' />\n" + " <parallel>\n" + " <instance id='i1'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='6' />\n" + " </prod>\n" + " </instance>\n" + " <instance id='i2'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + " </parallel>\n" + " <instance id='i3'>\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " <delay hours='18' />\n" + " <test>us-east-3</test>\n" + " </prod>\n" + " </instance>\n" + " <instance id='i4'>\n" + " <test />\n" + " <staging />\n" + " <prod>\n" + " <region>us-east-3</region>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"); i1.submit(applicationPackage); Optional<ApplicationVersion> v0 = i1.lastSubmission(); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().change().application()); assertEquals(v0, i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); tester.outstandingChangeDeployer().run(); assertEquals(v0, i1.instance().latestDeployed()); assertEquals(v0, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(Optional.empty(), i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(6)); tester.outstandingChangeDeployer().run(); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); i3.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(12)); i1.submit(applicationPackage); Optional<ApplicationVersion> v1 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); i2.runJob(productionUsEast3); assertEquals(v1, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(Optional.empty(), i2.instance().change().application()); assertEquals(v0, i3.instance().change().application()); assertEquals(Optional.empty(), i4.instance().change().application()); tester.clock().advance(Duration.ofHours(3)); i1.submit(applicationPackage); Optional<ApplicationVersion> v2 = i1.lastSubmission(); i4.runJob(systemTest).runJob(stagingTest); i1.runJob(productionUsEast3); tester.clock().advance(Duration.ofHours(3)); tester.outstandingChangeDeployer().run(); assertEquals(v0, i3.instance().change().application()); i3.runJob(testUsEast3); assertEquals(Optional.empty(), i3.instance().change().application()); tester.outstandingChangeDeployer().run(); assertEquals(v2, i1.instance().latestDeployed()); assertEquals(v1, i2.instance().latestDeployed()); assertEquals(v0, i3.instance().latestDeployed()); assertEquals(Optional.empty(), i1.instance().change().application()); assertEquals(v2, i2.instance().change().application()); assertEquals(v1, i3.instance().change().application()); assertEquals(v0, i4.instance().change().application()); } @Test public void testMultipleInstances() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .instances("instance1,instance2") .region("us-east-3") .build(); var app = tester.newDeploymentContext("tenant1", "application1", "instance1") .submit(applicationPackage) .completeRollout(); assertEquals(2, app.application().instances().size()); assertEquals(2, app.application().productionDeployments().values().stream() .mapToInt(Collection::size) .sum()); } @Test public void testDeclaredProductionTests() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder() .region("us-east-3") .delay(Duration.ofMinutes(1)) .test("us-east-3") .region("us-west-1") .region("us-central-1") .test("us-central-1") .test("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); app.assertNotRunning(productionUsWest1); tester.clock().advance(Duration.ofMinutes(1)); app.runJob(testUsEast3) .runJob(productionUsWest1).runJob(productionUsCentral1) .runJob(testUsCentral1).runJob(testUsWest1); assertEquals(Change.empty(), app.instance().change()); Version version0 = app.application().oldestDeployedPlatform().get(); Version version1 = Version.fromString("7.7"); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.triggerJobs(); app.assertRunning(testUsEast3); tester.upgrader().overrideConfidence(version1, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().maintain(); app.failDeployment(testUsEast3); app.assertNotRunning(testUsEast3); assertEquals(Change.empty(), app.instance().change()); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version0).withPin()); app.runJob(stagingTest).runJob(productionUsEast3); tester.clock().advance(Duration.ofMinutes(1)); app.failDeployment(testUsEast3); tester.clock().advance(Duration.ofMinutes(11)); app.runJob(testUsEast3); assertEquals(Change.empty().withPin(), app.instance().change()); } @Test @Test public void testRevisionJoinsUpgradeWithSeparateRollout() { var app = tester.newDeploymentContext().submit().deploy(); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsCentral1); tester.clock().advance(Duration.ofMinutes(1)); app.submit(); assertEquals(Change.of(version).with(app.lastSubmission().get()), app.instance().change()); app.runJob(systemTest).runJob(stagingTest).runJob(productionUsCentral1); app.runJob(productionUsEast3).runJob(productionUsWest1); tester.triggerJobs(); assertEquals(Change.of(app.lastSubmission().get()), app.instance().change()); app.runJob(productionUsEast3).runJob(productionUsWest1); assertEquals(Change.empty(), app.instance().change()); } @Test public void mixedDirectAndPipelineJobsInProduction() { ApplicationPackage cdPackage = new ApplicationPackageBuilder().region("cd-us-east-1") .region("cd-aws-us-east-1a") .build(); var zones = List.of(ZoneId.from("test.cd-us-west-1"), ZoneId.from("staging.cd-us-west-1"), ZoneId.from("prod.cd-us-east-1"), ZoneId.from("prod.cd-aws-us-east-1a")); tester.controllerTester() .setZones(zones, SystemName.cd) .setRoutingMethod(zones, RoutingMethod.shared); tester.controllerTester().upgradeSystem(Version.fromString("6.1")); tester.controllerTester().computeVersionStatus(); var app = tester.newDeploymentContext(); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); var version = new Version("7.1"); tester.controllerTester().upgradeSystem(version); tester.upgrader().maintain(); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .abortJob(systemTest) .jobAborted(stagingTest) .runJob(systemTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); app.runJob(productionCdUsEast1, cdPackage); app.submit(cdPackage); app.runJob(systemTest); tester.controller().applications().deploymentTrigger().forceTrigger(app.instanceId(), productionCdUsEast1, "user", false); app.runJob(productionCdUsEast1) .jobAborted(stagingTest) .runJob(stagingTest) .runJob(productionCdAwsUsEast1a); } @Test public void testsInSeparateInstance() { String deploymentSpec = "<deployment version='1.0' athenz-domain='domain' athenz-service='service'>\n" + " <instance id='canary'>\n" + " <upgrade policy='canary' />\n" + " <test />\n" + " <staging />\n" + " </instance>\n" + " <instance id='default'>\n" + " <prod>\n" + " <region active='true'>eu-west-1</region>\n" + " <test>eu-west-1</test>\n" + " </prod>\n" + " </instance>\n" + "</deployment>\n"; ApplicationPackage applicationPackage = ApplicationPackageBuilder.fromDeploymentXml(deploymentSpec); var canary = tester.newDeploymentContext("t", "a", "canary").submit(applicationPackage); var conservative = tester.newDeploymentContext("t", "a", "default"); canary.runJob(systemTest) .runJob(stagingTest); conservative.runJob(productionEuWest1) .runJob(testEuWest1); canary.submit(applicationPackage) .runJob(systemTest) .runJob(stagingTest); tester.outstandingChangeDeployer().run(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); tester.controllerTester().upgradeSystem(new Version("7.7.7")); tester.upgrader().maintain(); canary.runJob(systemTest) .runJob(stagingTest); tester.upgrader().maintain(); conservative.runJob(productionEuWest1) .runJob(testEuWest1); } @Test public void testEagerTests() { var app = tester.newDeploymentContext().submit().deploy(); Version version1 = new Version("7.8.9"); ApplicationVersion build1 = app.lastSubmission().get(); tester.controllerTester().upgradeSystem(version1); tester.upgrader().maintain(); app.runJob(stagingTest); app.submit(); ApplicationVersion build2 = app.lastSubmission().get(); assertNotEquals(build1, build2); tester.triggerJobs(); app.assertRunning(stagingTest); assertEquals(version1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetPlatform()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastCompleted().get().versions().targetApplication()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourcePlatform().get()); assertEquals(build1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().sourceApplication().get()); assertEquals(version1, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetPlatform()); assertEquals(build2, app.instanceJobs().get(stagingTest).lastTriggered().get().versions().targetApplication()); app.runJob(systemTest) .runJob(productionUsCentral1) .runJob(productionUsEast3) .runJob(productionUsWest1); tester.outstandingChangeDeployer().run(); assertEquals(RunStatus.running, tester.jobs().last(app.instanceId(), stagingTest).get().status()); app.runJob(stagingTest); tester.triggerJobs(); app.assertNotRunning(stagingTest); } @Test public void testTriggeringOfIdleTestJobsWhenFirstDeploymentIsOnNewerVersionThanChange() { ApplicationPackage applicationPackage = new ApplicationPackageBuilder().systemTest() .stagingTest() .region("us-east-3") .region("us-west-1") .build(); var app = tester.newDeploymentContext().submit(applicationPackage).deploy(); var appToAvoidVersionGC = tester.newDeploymentContext("g", "c", "default").submit().deploy(); Version version2 = new Version("7.8.9"); Version version3 = new Version("8.9.10"); tester.controllerTester().upgradeSystem(version2); tester.deploymentTrigger().triggerChange(appToAvoidVersionGC.instanceId(), Change.of(version2)); appToAvoidVersionGC.deployPlatform(version2); tester.controllerTester().upgradeSystem(version3); tester.deploymentTrigger().triggerChange(app.instanceId(), Change.of(version3)); app.runJob(systemTest).runJob(stagingTest); tester.triggerJobs(); tester.upgrader().overrideConfidence(version3, VespaVersion.Confidence.broken); tester.controllerTester().computeVersionStatus(); tester.upgrader().run(); assertEquals(Optional.of(version2), app.instance().change().platform()); app.runJob(systemTest) .runJob(productionUsEast3) .runJob(stagingTest) .runJob(productionUsWest1); assertEquals(version3, app.instanceJobs().get(productionUsEast3).lastSuccess().get().versions().targetPlatform()); assertEquals(version2, app.instanceJobs().get(productionUsWest1).lastSuccess().get().versions().targetPlatform()); assertEquals(Map.of(), app.deploymentStatus().jobsToRun()); assertEquals(Change.empty(), app.instance().change()); assertEquals(List.of(), tester.jobs().active()); } @Test public void testRetriggerQueue() { var app = tester.newDeploymentContext().submit().deploy(); app.submit(); tester.triggerJobs(); tester.deploymentTrigger().reTrigger(app.instanceId(), productionUsEast3); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); tester.deploymentTrigger().reTriggerOrAddToQueue(app.deploymentIdIn(ZoneId.from("prod", "us-east-3"))); List<RetriggerEntry> retriggerEntries = tester.controller().curator().readRetriggerEntries(); Assert.assertEquals(1, retriggerEntries.size()); } }
Doesn't this mean that a given deployment will send one request per host in the node repository to the orchestrator? In larger zones that is ~400.
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
if (suspended(host)) return false;
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
Didn't we consider this, @bjorncs ? I seem to remember that, but maybe I'm just wrong?
private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM"); ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create() .setTlsDetailsFactory(sslEngine -> new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol())) .setCiphers(allowedCiphers) .setSslContext(sslContext); if (builder.hostnameVerifier != null) tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier); return HttpAsyncClients.createHttp2Minimal(H2Config.custom() .setMaxConcurrentStreams(builder.maxStreamsPerConnection) .setCompressionEnabled(true) .setPushEnabled(false) .setInitialWindowSize(Integer.MAX_VALUE) .build(), IOReactorConfig.custom() .setIoThreadCount(2) .setTcpNoDelay(true) .setSoTimeout(Timeout.ofSeconds(10)) .build(), tlsStrategyBuilder.build()); }
new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol()))
private static CloseableHttpAsyncClient createHttpClient(FeedClientBuilderImpl builder) throws IOException { SSLContext sslContext = builder.constructSslContext(); String[] allowedCiphers = excludeH2Blacklisted(excludeWeak(sslContext.getSupportedSSLParameters().getCipherSuites())); if (allowedCiphers.length == 0) throw new IllegalStateException("No adequate SSL cipher suites supported by the JVM"); ClientTlsStrategyBuilder tlsStrategyBuilder = ClientTlsStrategyBuilder.create() .setTlsDetailsFactory(sslEngine -> new TlsDetails(sslEngine.getSession(), sslEngine.getApplicationProtocol())) .setCiphers(allowedCiphers) .setSslContext(sslContext); if (builder.hostnameVerifier != null) tlsStrategyBuilder.setHostnameVerifier(builder.hostnameVerifier); return HttpAsyncClients.createHttp2Minimal(H2Config.custom() .setMaxConcurrentStreams(builder.maxStreamsPerConnection) .setCompressionEnabled(true) .setPushEnabled(false) .setInitialWindowSize(Integer.MAX_VALUE) .build(), IOReactorConfig.custom() .setIoThreadCount(2) .setTcpNoDelay(true) .setSoTimeout(Timeout.ofSeconds(10)) .build(), tlsStrategyBuilder.build()); }
class Endpoint { private final CloseableHttpAsyncClient client; private final AtomicInteger inflight = new AtomicInteger(0); private final URI url; private Endpoint(CloseableHttpAsyncClient client, URI url) { this.client = client; this.url = url; this.client.start(); } }
class Endpoint { private final CloseableHttpAsyncClient client; private final AtomicInteger inflight = new AtomicInteger(0); private final URI url; private Endpoint(CloseableHttpAsyncClient client, URI url) { this.client = client; this.url = url; this.client.start(); } }
Will not work with old schema dir (ApplicationPackage.SEARCH_DEFINITIONS_DIR), is that intended?
private void addRankProfileFiles(Schema schema) { if (applicationPackage == null) return; Path rankProfilePath = ApplicationPackage.SCHEMAS_DIR.append(schema.getName()); for (NamedReader reader : applicationPackage.getFiles(rankProfilePath, ".profile")) { parseRankProfile(reader, schema); } }
Path rankProfilePath = ApplicationPackage.SCHEMAS_DIR.append(schema.getName());
private void addRankProfileFiles(Schema schema) { if (applicationPackage == null) return; Path legacyRankProfilePath = ApplicationPackage.SEARCH_DEFINITIONS_DIR.append(schema.getName()); for (NamedReader reader : applicationPackage.getFiles(legacyRankProfilePath, ".profile")) parseRankProfile(reader, schema); Path rankProfilePath = ApplicationPackage.SCHEMAS_DIR.append(schema.getName()); for (NamedReader reader : applicationPackage.getFiles(rankProfilePath, ".profile")) parseRankProfile(reader, schema); }
class ApplicationBuilder { private final ApplicationPackage applicationPackage; private final List<Schema> schemas = new ArrayList<>(); private final DocumentTypeManager documentTypeManager = new DocumentTypeManager(); private final RankProfileRegistry rankProfileRegistry; private final QueryProfileRegistry queryProfileRegistry; private final FileRegistry fileRegistry; private final DeployLogger deployLogger; private final ModelContext.Properties properties; /** True to build the document aspect only, skipping instantiation of rank profiles */ private final boolean documentsOnly; private Application application; private final Set<Class<? extends Processor>> processorsToSkip = new HashSet<>(); /** For testing only */ public ApplicationBuilder() { this(new RankProfileRegistry(), new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(DeployLogger deployLogger) { this(MockApplicationPackage.createEmpty(), deployLogger); } /** For testing only */ public ApplicationBuilder(DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry) { this(MockApplicationPackage.createEmpty(), deployLogger, rankProfileRegistry); } /** Used for generating documents for typed access to document fields in Java */ public ApplicationBuilder(boolean documentsOnly) { this(MockApplicationPackage.createEmpty(), new MockFileRegistry(), new BaseDeployLogger(), new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry(), documentsOnly); } /** For testing only */ public ApplicationBuilder(ApplicationPackage app, DeployLogger deployLogger) { this(app, new MockFileRegistry(), deployLogger, new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(ApplicationPackage app, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry) { this(app, new MockFileRegistry(), deployLogger, new TestProperties(), rankProfileRegistry, new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(RankProfileRegistry rankProfileRegistry) { this(rankProfileRegistry, new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { this(rankProfileRegistry, queryProfileRegistry, new TestProperties()); } public ApplicationBuilder(RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry, ModelContext.Properties properties) { this(MockApplicationPackage.createEmpty(), new MockFileRegistry(), new BaseDeployLogger(), properties, rankProfileRegistry, queryProfileRegistry); } public ApplicationBuilder(ApplicationPackage app, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { this(app, fileRegistry, deployLogger, properties, rankProfileRegistry, queryProfileRegistry, false); } private ApplicationBuilder(ApplicationPackage applicationPackage, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry, boolean documentsOnly) { this.applicationPackage = applicationPackage; this.rankProfileRegistry = rankProfileRegistry; this.queryProfileRegistry = queryProfileRegistry; this.fileRegistry = fileRegistry; this.deployLogger = deployLogger; this.properties = properties; this.documentsOnly = documentsOnly; for (NamedReader reader : applicationPackage.getSchemas()) addSchema(reader); } /** * Adds a schema to this application. * * @param fileName the name of the file to import * @return the name of the imported object * @throws IOException thrown if the file can not be read for some reason * @throws ParseException thrown if the file does not contain a valid search definition */ public Schema addSchemaFile(String fileName) throws IOException, ParseException { File file = new File(fileName); return addSchema(IOUtils.readFile(file)); } /** * Reads and parses the schema string provided by the given reader. Once all schemas have been * imported, call {@link * * @param reader the reader whose content to import */ public void addSchema(NamedReader reader) { try { String schemaName = addSchema(IOUtils.readAll(reader)).getName(); String schemaFileName = stripSuffix(reader.getName(), ApplicationPackage.SD_NAME_SUFFIX); if ( ! schemaFileName.equals(schemaName)) { throw new IllegalArgumentException("The file containing schema '" + schemaName + "' must be named '" + schemaName + ApplicationPackage.SD_NAME_SUFFIX + "', not " + reader.getName()); } } catch (ParseException e) { throw new IllegalArgumentException("Could not parse schema file '" + reader.getName() + "'", e); } catch (IOException e) { throw new IllegalArgumentException("Could not read schema file '" + reader.getName() + "'", e); } finally { closeIgnoreException(reader.getReader()); } } private static String stripSuffix(String readerName, String suffix) { if ( ! readerName.endsWith(suffix)) throw new IllegalArgumentException("Schema '" + readerName + "' does not end with " + suffix); return readerName.substring(0, readerName.length() - suffix.length()); } /** * Adds a schema to this * * @param schemaString the content of the schema */ public Schema addSchema(String schemaString) throws ParseException { return add(createSchema(schemaString)); } /** * Registers the given schema to the application to be built during {@link * {@link Schema} object is considered to be "raw" if it has not already been processed. This is the case for most * programmatically constructed schemas used in unit tests. * * @param schema the object to import * @throws IllegalArgumentException if the given search object has already been processed */ public Schema add(Schema schema) { if (schema.getName() == null) throw new IllegalArgumentException("Schema has no name"); schemas.add(schema); return schema; } private Schema createSchema(String schemaString) throws ParseException { Schema schema = parseSchema(schemaString); addRankProfileFiles(schema); return schema; } private Schema parseSchema(String schemaString) throws ParseException { SimpleCharStream stream = new SimpleCharStream(schemaString); try { return new SDParser(stream, applicationPackage, fileRegistry, deployLogger, properties, rankProfileRegistry, documentsOnly) .schema(documentTypeManager); } catch (TokenMgrException e) { throw new ParseException("Unknown symbol: " + e.getMessage()); } catch (ParseException pe) { throw new ParseException(stream.formatException(Exceptions.toMessageString(pe))); } } /** Parses the rank profile of the given reader and adds it to the rank profile registry for this schema. */ private void parseRankProfile(NamedReader reader, Schema schema) { try { SimpleCharStream stream = new SimpleCharStream(IOUtils.readAll(reader.getReader())); try { new SDParser(stream, applicationPackage, fileRegistry, deployLogger, properties, rankProfileRegistry, documentsOnly) .rankProfile(schema); } catch (TokenMgrException e) { throw new ParseException("Unknown symbol: " + e.getMessage()); } catch (ParseException pe) { throw new ParseException(stream.formatException(Exceptions.toMessageString(pe))); } } catch (IOException e) { throw new IllegalArgumentException("Could not read rank profile " + reader.getName(), e); } catch (ParseException e) { throw new IllegalArgumentException("Could not parse rank profile " + reader.getName(), e); } } /** * Processes and finalizes the schemas of this. * * @throws IllegalStateException thrown if this method has already been called */ public Application build(boolean validate) { if (application != null) throw new IllegalStateException("Application already built"); application = new Application(applicationPackage, schemas, rankProfileRegistry, new QueryProfiles(queryProfileRegistry, deployLogger), properties, documentsOnly, validate, processorsToSkip, deployLogger); return application; } /** Returns a modifiable set of processors we should skip for these schemas. Useful for testing. */ public Set<Class<? extends Processor>> processorsToSkip() { return processorsToSkip; } /** * Convenience method to call {@link * built. This method will never return null. * * @return the built object * @throws IllegalStateException if there is not exactly one search. */ public Schema getSchema() { if (application == null) throw new IllegalStateException("Application not built"); if (application.schemas().size() != 1) throw new IllegalStateException("This call only works if we have 1 schema. Schemas: " + application.schemas().values()); return application.schemas().values().stream().findAny().get(); } public DocumentModel getModel() { return application.documentModel(); } /** * Returns the built {@link Schema} object that has the given name. If the name is unknown, this method will simply * return null. * * @param name the name of the schema to return, * or null to return the only one or throw an exception if there are multiple to choose from * @return the built object, or null if none with this name * @throws IllegalStateException if {@link */ public Schema getSchema(String name) { if (application == null) throw new IllegalStateException("Application not built"); if (name == null) return getSchema(); return application.schemas().get(name); } public Application application() { return application; } /** * Convenience method to return a list of all built {@link Schema} objects. * * @return the list of built searches */ public List<Schema> getSchemaList() { return new ArrayList<>(application.schemas().values()); } /** * Convenience factory method to import and build a {@link Schema} object from a string. * * @param sd the string to build from * @return the built {@link ApplicationBuilder} object * @throws ParseException thrown if there is a problem parsing the string */ public static ApplicationBuilder createFromString(String sd) throws ParseException { return createFromString(sd, new BaseDeployLogger()); } public static ApplicationBuilder createFromString(String sd, DeployLogger logger) throws ParseException { ApplicationBuilder builder = new ApplicationBuilder(logger); builder.addSchema(sd); builder.build(true); return builder; } public static ApplicationBuilder createFromStrings(DeployLogger logger, String ... schemas) throws ParseException { ApplicationBuilder builder = new ApplicationBuilder(logger); for (var schema : schemas) builder.addSchema(schema); builder.build(true); return builder; } /** * Convenience factory method to import and build a {@link Schema} object from a file. Only for testing. * * @param fileName the file to build from * @return the built {@link ApplicationBuilder} object * @throws IOException if there was a problem reading the file. * @throws ParseException if there was a problem parsing the file content. */ public static ApplicationBuilder createFromFile(String fileName) throws IOException, ParseException { return createFromFile(fileName, new BaseDeployLogger()); } /** * Convenience factory methdd to create a SearchBuilder from multiple SD files. Only for testing. */ public static ApplicationBuilder createFromFiles(Collection<String> fileNames) throws IOException, ParseException { return createFromFiles(fileNames, new BaseDeployLogger()); } public static ApplicationBuilder createFromFile(String fileName, DeployLogger logger) throws IOException, ParseException { return createFromFile(fileName, logger, new RankProfileRegistry(), new QueryProfileRegistry()); } private static ApplicationBuilder createFromFiles(Collection<String> fileNames, DeployLogger logger) throws IOException, ParseException { return createFromFiles(fileNames, new MockFileRegistry(), logger, new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. * * @param fileName the file to build from. * @param deployLogger logger for deploy messages. * @param rankProfileRegistry registry for rank profiles. * @return the built {@link ApplicationBuilder} object. * @throws IOException if there was a problem reading the file. * @throws ParseException if there was a problem parsing the file content. */ private static ApplicationBuilder createFromFile(String fileName, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryprofileRegistry) throws IOException, ParseException { return createFromFiles(Collections.singletonList(fileName), new MockFileRegistry(), deployLogger, new TestProperties(), rankProfileRegistry, queryprofileRegistry); } /** * Convenience factory methdd to create a SearchBuilder from multiple SD files.. */ private static ApplicationBuilder createFromFiles(Collection<String> fileNames, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryprofileRegistry) throws IOException, ParseException { ApplicationBuilder builder = new ApplicationBuilder(MockApplicationPackage.createEmpty(), fileRegistry, deployLogger, properties, rankProfileRegistry, queryprofileRegistry); for (String fileName : fileNames) { builder.addSchemaFile(fileName); } builder.build(true); return builder; } public static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties) throws IOException, ParseException { return createFromDirectory(dir, fileRegistry, logger, properties, new RankProfileRegistry()); } public static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry) throws IOException, ParseException { return createFromDirectory(dir, fileRegistry, logger, properties, rankProfileRegistry, createQueryProfileRegistryFromDirectory(dir)); } private static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return createFromDirectory(dir, MockApplicationPackage.fromSearchDefinitionAndRootDirectory(dir), fileRegistry, logger, properties, rankProfileRegistry, queryProfileRegistry); } private static ApplicationBuilder createFromDirectory(String dir, ApplicationPackage applicationPackage, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { ApplicationBuilder builder = new ApplicationBuilder(applicationPackage, fileRegistry, deployLogger, properties, rankProfileRegistry, queryProfileRegistry); for (var i = Files.list(new File(dir).toPath()).filter(p -> p.getFileName().toString().endsWith(".sd")).iterator(); i.hasNext(); ) { builder.addSchemaFile(i.next().toString()); } builder.build(true); return builder; } private static QueryProfileRegistry createQueryProfileRegistryFromDirectory(String dir) { File queryProfilesDir = new File(dir, "query-profiles"); if ( ! queryProfilesDir.exists()) return new QueryProfileRegistry(); return new QueryProfileXMLReader().read(queryProfilesDir.toString()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. Only for testing. * * @param fileName the file to build from * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName) throws IOException, ParseException { return buildFromFile(fileName, new BaseDeployLogger(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. * * @param fileName the file to build from * @param rankProfileRegistry registry for rank profiles * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return buildFromFile(fileName, new BaseDeployLogger(), rankProfileRegistry, queryProfileRegistry); } /** * Convenience factory method to import and build a {@link Schema} from a file. * * @param fileName the file to build from * @param deployLogger logger for deploy messages * @param rankProfileRegistry registry for rank profiles * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return createFromFile(fileName, deployLogger, rankProfileRegistry, queryProfileRegistry).getSchema(); } /** * Convenience factory method to import and build a {@link Schema} object from a raw object. * * @param rawSchema the raw object to build from * @return the built {@link ApplicationBuilder} object * @see */ public static ApplicationBuilder createFromRawSchema(Schema rawSchema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { ApplicationBuilder builder = new ApplicationBuilder(rankProfileRegistry, queryProfileRegistry); builder.add(rawSchema); builder.build(true); return builder; } /** * Convenience factory method to import and build a {@link Schema} object from a raw object. * * @param rawSchema the raw object to build from * @return the built {@link Schema} object * @see */ public static Schema buildFromRawSchema(Schema rawSchema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { return createFromRawSchema(rawSchema, rankProfileRegistry, queryProfileRegistry).getSchema(); } public RankProfileRegistry getRankProfileRegistry() { return rankProfileRegistry; } public QueryProfileRegistry getQueryProfileRegistry() { return queryProfileRegistry; } public ModelContext.Properties getProperties() { return properties; } public DeployLogger getDeployLogger() { return deployLogger; } @SuppressWarnings("EmptyCatchBlock") private static void closeIgnoreException(Reader reader) { try { reader.close(); } catch(Exception e) {} } }
class ApplicationBuilder { private final ApplicationPackage applicationPackage; private final List<Schema> schemas = new ArrayList<>(); private final DocumentTypeManager documentTypeManager = new DocumentTypeManager(); private final RankProfileRegistry rankProfileRegistry; private final QueryProfileRegistry queryProfileRegistry; private final FileRegistry fileRegistry; private final DeployLogger deployLogger; private final ModelContext.Properties properties; /** True to build the document aspect only, skipping instantiation of rank profiles */ private final boolean documentsOnly; private Application application; private final Set<Class<? extends Processor>> processorsToSkip = new HashSet<>(); /** For testing only */ public ApplicationBuilder() { this(new RankProfileRegistry(), new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(DeployLogger deployLogger) { this(MockApplicationPackage.createEmpty(), deployLogger); } /** For testing only */ public ApplicationBuilder(DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry) { this(MockApplicationPackage.createEmpty(), deployLogger, rankProfileRegistry); } /** Used for generating documents for typed access to document fields in Java */ public ApplicationBuilder(boolean documentsOnly) { this(MockApplicationPackage.createEmpty(), new MockFileRegistry(), new BaseDeployLogger(), new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry(), documentsOnly); } /** For testing only */ public ApplicationBuilder(ApplicationPackage app, DeployLogger deployLogger) { this(app, new MockFileRegistry(), deployLogger, new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(ApplicationPackage app, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry) { this(app, new MockFileRegistry(), deployLogger, new TestProperties(), rankProfileRegistry, new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(RankProfileRegistry rankProfileRegistry) { this(rankProfileRegistry, new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { this(rankProfileRegistry, queryProfileRegistry, new TestProperties()); } public ApplicationBuilder(RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry, ModelContext.Properties properties) { this(MockApplicationPackage.createEmpty(), new MockFileRegistry(), new BaseDeployLogger(), properties, rankProfileRegistry, queryProfileRegistry); } public ApplicationBuilder(ApplicationPackage app, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { this(app, fileRegistry, deployLogger, properties, rankProfileRegistry, queryProfileRegistry, false); } private ApplicationBuilder(ApplicationPackage applicationPackage, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry, boolean documentsOnly) { this.applicationPackage = applicationPackage; this.rankProfileRegistry = rankProfileRegistry; this.queryProfileRegistry = queryProfileRegistry; this.fileRegistry = fileRegistry; this.deployLogger = deployLogger; this.properties = properties; this.documentsOnly = documentsOnly; for (NamedReader reader : applicationPackage.getSchemas()) addSchema(reader); } /** * Adds a schema to this application. * * @param fileName the name of the file to import * @return the name of the imported object * @throws IOException thrown if the file can not be read for some reason * @throws ParseException thrown if the file does not contain a valid search definition */ public Schema addSchemaFile(String fileName) throws IOException, ParseException { File file = new File(fileName); return addSchema(IOUtils.readFile(file)); } /** * Reads and parses the schema string provided by the given reader. Once all schemas have been * imported, call {@link * * @param reader the reader whose content to import */ public void addSchema(NamedReader reader) { try { String schemaName = addSchema(IOUtils.readAll(reader)).getName(); String schemaFileName = stripSuffix(reader.getName(), ApplicationPackage.SD_NAME_SUFFIX); if ( ! schemaFileName.equals(schemaName)) { throw new IllegalArgumentException("The file containing schema '" + schemaName + "' must be named '" + schemaName + ApplicationPackage.SD_NAME_SUFFIX + "', not " + reader.getName()); } } catch (ParseException e) { throw new IllegalArgumentException("Could not parse schema file '" + reader.getName() + "'", e); } catch (IOException e) { throw new IllegalArgumentException("Could not read schema file '" + reader.getName() + "'", e); } finally { closeIgnoreException(reader.getReader()); } } private static String stripSuffix(String readerName, String suffix) { if ( ! readerName.endsWith(suffix)) throw new IllegalArgumentException("Schema '" + readerName + "' does not end with " + suffix); return readerName.substring(0, readerName.length() - suffix.length()); } /** * Adds a schema to this * * @param schemaString the content of the schema */ public Schema addSchema(String schemaString) throws ParseException { return add(createSchema(schemaString)); } /** * Registers the given schema to the application to be built during {@link * {@link Schema} object is considered to be "raw" if it has not already been processed. This is the case for most * programmatically constructed schemas used in unit tests. * * @param schema the object to import * @throws IllegalArgumentException if the given search object has already been processed */ public Schema add(Schema schema) { if (schema.getName() == null) throw new IllegalArgumentException("Schema has no name"); schemas.add(schema); return schema; } private Schema createSchema(String schemaString) throws ParseException { Schema schema = parseSchema(schemaString); addRankProfileFiles(schema); return schema; } private Schema parseSchema(String schemaString) throws ParseException { SimpleCharStream stream = new SimpleCharStream(schemaString); try { return parserOf(stream).schema(documentTypeManager); } catch (TokenMgrException e) { throw new ParseException("Unknown symbol: " + e.getMessage()); } catch (ParseException pe) { throw new ParseException(stream.formatException(Exceptions.toMessageString(pe))); } } /** Parses the rank profile of the given reader and adds it to the rank profile registry for this schema. */ private void parseRankProfile(NamedReader reader, Schema schema) { try { SimpleCharStream stream = new SimpleCharStream(IOUtils.readAll(reader.getReader())); try { parserOf(stream).rankProfile(schema); } catch (TokenMgrException e) { throw new ParseException("Unknown symbol: " + e.getMessage()); } catch (ParseException pe) { throw new ParseException(stream.formatException(Exceptions.toMessageString(pe))); } } catch (IOException e) { throw new IllegalArgumentException("Could not read rank profile " + reader.getName(), e); } catch (ParseException e) { throw new IllegalArgumentException("Could not parse rank profile " + reader.getName(), e); } } private SDParser parserOf(SimpleCharStream stream) { return new SDParser(stream, applicationPackage, fileRegistry, deployLogger, properties, rankProfileRegistry, documentsOnly); } /** * Processes and finalizes the schemas of this. * * @throws IllegalStateException thrown if this method has already been called */ public Application build(boolean validate) { if (application != null) throw new IllegalStateException("Application already built"); application = new Application(applicationPackage, schemas, rankProfileRegistry, new QueryProfiles(queryProfileRegistry, deployLogger), properties, documentsOnly, validate, processorsToSkip, deployLogger); return application; } /** Returns a modifiable set of processors we should skip for these schemas. Useful for testing. */ public Set<Class<? extends Processor>> processorsToSkip() { return processorsToSkip; } /** * Convenience method to call {@link * built. This method will never return null. * * @return the built object * @throws IllegalStateException if there is not exactly one search. */ public Schema getSchema() { if (application == null) throw new IllegalStateException("Application not built"); if (application.schemas().size() != 1) throw new IllegalStateException("This call only works if we have 1 schema. Schemas: " + application.schemas().values()); return application.schemas().values().stream().findAny().get(); } public DocumentModel getModel() { return application.documentModel(); } /** * Returns the built {@link Schema} object that has the given name. If the name is unknown, this method will simply * return null. * * @param name the name of the schema to return, * or null to return the only one or throw an exception if there are multiple to choose from * @return the built object, or null if none with this name * @throws IllegalStateException if {@link */ public Schema getSchema(String name) { if (application == null) throw new IllegalStateException("Application not built"); if (name == null) return getSchema(); return application.schemas().get(name); } public Application application() { return application; } /** * Convenience method to return a list of all built {@link Schema} objects. * * @return the list of built searches */ public List<Schema> getSchemaList() { return new ArrayList<>(application.schemas().values()); } /** * Convenience factory method to import and build a {@link Schema} object from a string. * * @param sd the string to build from * @return the built {@link ApplicationBuilder} object * @throws ParseException thrown if there is a problem parsing the string */ public static ApplicationBuilder createFromString(String sd) throws ParseException { return createFromString(sd, new BaseDeployLogger()); } public static ApplicationBuilder createFromString(String sd, DeployLogger logger) throws ParseException { ApplicationBuilder builder = new ApplicationBuilder(logger); builder.addSchema(sd); builder.build(true); return builder; } public static ApplicationBuilder createFromStrings(DeployLogger logger, String ... schemas) throws ParseException { ApplicationBuilder builder = new ApplicationBuilder(logger); for (var schema : schemas) builder.addSchema(schema); builder.build(true); return builder; } /** * Convenience factory method to import and build a {@link Schema} object from a file. Only for testing. * * @param fileName the file to build from * @return the built {@link ApplicationBuilder} object * @throws IOException if there was a problem reading the file. * @throws ParseException if there was a problem parsing the file content. */ public static ApplicationBuilder createFromFile(String fileName) throws IOException, ParseException { return createFromFile(fileName, new BaseDeployLogger()); } /** * Convenience factory methdd to create a SearchBuilder from multiple SD files. Only for testing. */ public static ApplicationBuilder createFromFiles(Collection<String> fileNames) throws IOException, ParseException { return createFromFiles(fileNames, new BaseDeployLogger()); } public static ApplicationBuilder createFromFile(String fileName, DeployLogger logger) throws IOException, ParseException { return createFromFile(fileName, logger, new RankProfileRegistry(), new QueryProfileRegistry()); } private static ApplicationBuilder createFromFiles(Collection<String> fileNames, DeployLogger logger) throws IOException, ParseException { return createFromFiles(fileNames, new MockFileRegistry(), logger, new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. * * @param fileName the file to build from. * @param deployLogger logger for deploy messages. * @param rankProfileRegistry registry for rank profiles. * @return the built {@link ApplicationBuilder} object. * @throws IOException if there was a problem reading the file. * @throws ParseException if there was a problem parsing the file content. */ private static ApplicationBuilder createFromFile(String fileName, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryprofileRegistry) throws IOException, ParseException { return createFromFiles(Collections.singletonList(fileName), new MockFileRegistry(), deployLogger, new TestProperties(), rankProfileRegistry, queryprofileRegistry); } /** * Convenience factory methdd to create a SearchBuilder from multiple SD files.. */ private static ApplicationBuilder createFromFiles(Collection<String> fileNames, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryprofileRegistry) throws IOException, ParseException { ApplicationBuilder builder = new ApplicationBuilder(MockApplicationPackage.createEmpty(), fileRegistry, deployLogger, properties, rankProfileRegistry, queryprofileRegistry); for (String fileName : fileNames) { builder.addSchemaFile(fileName); } builder.build(true); return builder; } public static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties) throws IOException, ParseException { return createFromDirectory(dir, fileRegistry, logger, properties, new RankProfileRegistry()); } public static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry) throws IOException, ParseException { return createFromDirectory(dir, fileRegistry, logger, properties, rankProfileRegistry, createQueryProfileRegistryFromDirectory(dir)); } private static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return createFromDirectory(dir, MockApplicationPackage.fromSearchDefinitionAndRootDirectory(dir), fileRegistry, logger, properties, rankProfileRegistry, queryProfileRegistry); } private static ApplicationBuilder createFromDirectory(String dir, ApplicationPackage applicationPackage, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { ApplicationBuilder builder = new ApplicationBuilder(applicationPackage, fileRegistry, deployLogger, properties, rankProfileRegistry, queryProfileRegistry); for (var i = Files.list(new File(dir).toPath()).filter(p -> p.getFileName().toString().endsWith(".sd")).iterator(); i.hasNext(); ) { builder.addSchemaFile(i.next().toString()); } builder.build(true); return builder; } private static QueryProfileRegistry createQueryProfileRegistryFromDirectory(String dir) { File queryProfilesDir = new File(dir, "query-profiles"); if ( ! queryProfilesDir.exists()) return new QueryProfileRegistry(); return new QueryProfileXMLReader().read(queryProfilesDir.toString()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. Only for testing. * * @param fileName the file to build from * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName) throws IOException, ParseException { return buildFromFile(fileName, new BaseDeployLogger(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. * * @param fileName the file to build from * @param rankProfileRegistry registry for rank profiles * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return buildFromFile(fileName, new BaseDeployLogger(), rankProfileRegistry, queryProfileRegistry); } /** * Convenience factory method to import and build a {@link Schema} from a file. * * @param fileName the file to build from * @param deployLogger logger for deploy messages * @param rankProfileRegistry registry for rank profiles * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return createFromFile(fileName, deployLogger, rankProfileRegistry, queryProfileRegistry).getSchema(); } /** * Convenience factory method to import and build a {@link Schema} object from a raw object. * * @param rawSchema the raw object to build from * @return the built {@link ApplicationBuilder} object * @see */ public static ApplicationBuilder createFromRawSchema(Schema rawSchema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { ApplicationBuilder builder = new ApplicationBuilder(rankProfileRegistry, queryProfileRegistry); builder.add(rawSchema); builder.build(true); return builder; } /** * Convenience factory method to import and build a {@link Schema} object from a raw object. * * @param rawSchema the raw object to build from * @return the built {@link Schema} object * @see */ public static Schema buildFromRawSchema(Schema rawSchema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { return createFromRawSchema(rawSchema, rankProfileRegistry, queryProfileRegistry).getSchema(); } public RankProfileRegistry getRankProfileRegistry() { return rankProfileRegistry; } public QueryProfileRegistry getQueryProfileRegistry() { return queryProfileRegistry; } public ModelContext.Properties getProperties() { return properties; } public DeployLogger getDeployLogger() { return deployLogger; } @SuppressWarnings("EmptyCatchBlock") private static void closeIgnoreException(Reader reader) { try { reader.close(); } catch(Exception e) {} } }
Yes, but maybe it'll just cause confusion. I added it.
private void addRankProfileFiles(Schema schema) { if (applicationPackage == null) return; Path rankProfilePath = ApplicationPackage.SCHEMAS_DIR.append(schema.getName()); for (NamedReader reader : applicationPackage.getFiles(rankProfilePath, ".profile")) { parseRankProfile(reader, schema); } }
Path rankProfilePath = ApplicationPackage.SCHEMAS_DIR.append(schema.getName());
private void addRankProfileFiles(Schema schema) { if (applicationPackage == null) return; Path legacyRankProfilePath = ApplicationPackage.SEARCH_DEFINITIONS_DIR.append(schema.getName()); for (NamedReader reader : applicationPackage.getFiles(legacyRankProfilePath, ".profile")) parseRankProfile(reader, schema); Path rankProfilePath = ApplicationPackage.SCHEMAS_DIR.append(schema.getName()); for (NamedReader reader : applicationPackage.getFiles(rankProfilePath, ".profile")) parseRankProfile(reader, schema); }
class ApplicationBuilder { private final ApplicationPackage applicationPackage; private final List<Schema> schemas = new ArrayList<>(); private final DocumentTypeManager documentTypeManager = new DocumentTypeManager(); private final RankProfileRegistry rankProfileRegistry; private final QueryProfileRegistry queryProfileRegistry; private final FileRegistry fileRegistry; private final DeployLogger deployLogger; private final ModelContext.Properties properties; /** True to build the document aspect only, skipping instantiation of rank profiles */ private final boolean documentsOnly; private Application application; private final Set<Class<? extends Processor>> processorsToSkip = new HashSet<>(); /** For testing only */ public ApplicationBuilder() { this(new RankProfileRegistry(), new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(DeployLogger deployLogger) { this(MockApplicationPackage.createEmpty(), deployLogger); } /** For testing only */ public ApplicationBuilder(DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry) { this(MockApplicationPackage.createEmpty(), deployLogger, rankProfileRegistry); } /** Used for generating documents for typed access to document fields in Java */ public ApplicationBuilder(boolean documentsOnly) { this(MockApplicationPackage.createEmpty(), new MockFileRegistry(), new BaseDeployLogger(), new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry(), documentsOnly); } /** For testing only */ public ApplicationBuilder(ApplicationPackage app, DeployLogger deployLogger) { this(app, new MockFileRegistry(), deployLogger, new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(ApplicationPackage app, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry) { this(app, new MockFileRegistry(), deployLogger, new TestProperties(), rankProfileRegistry, new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(RankProfileRegistry rankProfileRegistry) { this(rankProfileRegistry, new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { this(rankProfileRegistry, queryProfileRegistry, new TestProperties()); } public ApplicationBuilder(RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry, ModelContext.Properties properties) { this(MockApplicationPackage.createEmpty(), new MockFileRegistry(), new BaseDeployLogger(), properties, rankProfileRegistry, queryProfileRegistry); } public ApplicationBuilder(ApplicationPackage app, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { this(app, fileRegistry, deployLogger, properties, rankProfileRegistry, queryProfileRegistry, false); } private ApplicationBuilder(ApplicationPackage applicationPackage, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry, boolean documentsOnly) { this.applicationPackage = applicationPackage; this.rankProfileRegistry = rankProfileRegistry; this.queryProfileRegistry = queryProfileRegistry; this.fileRegistry = fileRegistry; this.deployLogger = deployLogger; this.properties = properties; this.documentsOnly = documentsOnly; for (NamedReader reader : applicationPackage.getSchemas()) addSchema(reader); } /** * Adds a schema to this application. * * @param fileName the name of the file to import * @return the name of the imported object * @throws IOException thrown if the file can not be read for some reason * @throws ParseException thrown if the file does not contain a valid search definition */ public Schema addSchemaFile(String fileName) throws IOException, ParseException { File file = new File(fileName); return addSchema(IOUtils.readFile(file)); } /** * Reads and parses the schema string provided by the given reader. Once all schemas have been * imported, call {@link * * @param reader the reader whose content to import */ public void addSchema(NamedReader reader) { try { String schemaName = addSchema(IOUtils.readAll(reader)).getName(); String schemaFileName = stripSuffix(reader.getName(), ApplicationPackage.SD_NAME_SUFFIX); if ( ! schemaFileName.equals(schemaName)) { throw new IllegalArgumentException("The file containing schema '" + schemaName + "' must be named '" + schemaName + ApplicationPackage.SD_NAME_SUFFIX + "', not " + reader.getName()); } } catch (ParseException e) { throw new IllegalArgumentException("Could not parse schema file '" + reader.getName() + "'", e); } catch (IOException e) { throw new IllegalArgumentException("Could not read schema file '" + reader.getName() + "'", e); } finally { closeIgnoreException(reader.getReader()); } } private static String stripSuffix(String readerName, String suffix) { if ( ! readerName.endsWith(suffix)) throw new IllegalArgumentException("Schema '" + readerName + "' does not end with " + suffix); return readerName.substring(0, readerName.length() - suffix.length()); } /** * Adds a schema to this * * @param schemaString the content of the schema */ public Schema addSchema(String schemaString) throws ParseException { return add(createSchema(schemaString)); } /** * Registers the given schema to the application to be built during {@link * {@link Schema} object is considered to be "raw" if it has not already been processed. This is the case for most * programmatically constructed schemas used in unit tests. * * @param schema the object to import * @throws IllegalArgumentException if the given search object has already been processed */ public Schema add(Schema schema) { if (schema.getName() == null) throw new IllegalArgumentException("Schema has no name"); schemas.add(schema); return schema; } private Schema createSchema(String schemaString) throws ParseException { Schema schema = parseSchema(schemaString); addRankProfileFiles(schema); return schema; } private Schema parseSchema(String schemaString) throws ParseException { SimpleCharStream stream = new SimpleCharStream(schemaString); try { return new SDParser(stream, applicationPackage, fileRegistry, deployLogger, properties, rankProfileRegistry, documentsOnly) .schema(documentTypeManager); } catch (TokenMgrException e) { throw new ParseException("Unknown symbol: " + e.getMessage()); } catch (ParseException pe) { throw new ParseException(stream.formatException(Exceptions.toMessageString(pe))); } } /** Parses the rank profile of the given reader and adds it to the rank profile registry for this schema. */ private void parseRankProfile(NamedReader reader, Schema schema) { try { SimpleCharStream stream = new SimpleCharStream(IOUtils.readAll(reader.getReader())); try { new SDParser(stream, applicationPackage, fileRegistry, deployLogger, properties, rankProfileRegistry, documentsOnly) .rankProfile(schema); } catch (TokenMgrException e) { throw new ParseException("Unknown symbol: " + e.getMessage()); } catch (ParseException pe) { throw new ParseException(stream.formatException(Exceptions.toMessageString(pe))); } } catch (IOException e) { throw new IllegalArgumentException("Could not read rank profile " + reader.getName(), e); } catch (ParseException e) { throw new IllegalArgumentException("Could not parse rank profile " + reader.getName(), e); } } /** * Processes and finalizes the schemas of this. * * @throws IllegalStateException thrown if this method has already been called */ public Application build(boolean validate) { if (application != null) throw new IllegalStateException("Application already built"); application = new Application(applicationPackage, schemas, rankProfileRegistry, new QueryProfiles(queryProfileRegistry, deployLogger), properties, documentsOnly, validate, processorsToSkip, deployLogger); return application; } /** Returns a modifiable set of processors we should skip for these schemas. Useful for testing. */ public Set<Class<? extends Processor>> processorsToSkip() { return processorsToSkip; } /** * Convenience method to call {@link * built. This method will never return null. * * @return the built object * @throws IllegalStateException if there is not exactly one search. */ public Schema getSchema() { if (application == null) throw new IllegalStateException("Application not built"); if (application.schemas().size() != 1) throw new IllegalStateException("This call only works if we have 1 schema. Schemas: " + application.schemas().values()); return application.schemas().values().stream().findAny().get(); } public DocumentModel getModel() { return application.documentModel(); } /** * Returns the built {@link Schema} object that has the given name. If the name is unknown, this method will simply * return null. * * @param name the name of the schema to return, * or null to return the only one or throw an exception if there are multiple to choose from * @return the built object, or null if none with this name * @throws IllegalStateException if {@link */ public Schema getSchema(String name) { if (application == null) throw new IllegalStateException("Application not built"); if (name == null) return getSchema(); return application.schemas().get(name); } public Application application() { return application; } /** * Convenience method to return a list of all built {@link Schema} objects. * * @return the list of built searches */ public List<Schema> getSchemaList() { return new ArrayList<>(application.schemas().values()); } /** * Convenience factory method to import and build a {@link Schema} object from a string. * * @param sd the string to build from * @return the built {@link ApplicationBuilder} object * @throws ParseException thrown if there is a problem parsing the string */ public static ApplicationBuilder createFromString(String sd) throws ParseException { return createFromString(sd, new BaseDeployLogger()); } public static ApplicationBuilder createFromString(String sd, DeployLogger logger) throws ParseException { ApplicationBuilder builder = new ApplicationBuilder(logger); builder.addSchema(sd); builder.build(true); return builder; } public static ApplicationBuilder createFromStrings(DeployLogger logger, String ... schemas) throws ParseException { ApplicationBuilder builder = new ApplicationBuilder(logger); for (var schema : schemas) builder.addSchema(schema); builder.build(true); return builder; } /** * Convenience factory method to import and build a {@link Schema} object from a file. Only for testing. * * @param fileName the file to build from * @return the built {@link ApplicationBuilder} object * @throws IOException if there was a problem reading the file. * @throws ParseException if there was a problem parsing the file content. */ public static ApplicationBuilder createFromFile(String fileName) throws IOException, ParseException { return createFromFile(fileName, new BaseDeployLogger()); } /** * Convenience factory methdd to create a SearchBuilder from multiple SD files. Only for testing. */ public static ApplicationBuilder createFromFiles(Collection<String> fileNames) throws IOException, ParseException { return createFromFiles(fileNames, new BaseDeployLogger()); } public static ApplicationBuilder createFromFile(String fileName, DeployLogger logger) throws IOException, ParseException { return createFromFile(fileName, logger, new RankProfileRegistry(), new QueryProfileRegistry()); } private static ApplicationBuilder createFromFiles(Collection<String> fileNames, DeployLogger logger) throws IOException, ParseException { return createFromFiles(fileNames, new MockFileRegistry(), logger, new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. * * @param fileName the file to build from. * @param deployLogger logger for deploy messages. * @param rankProfileRegistry registry for rank profiles. * @return the built {@link ApplicationBuilder} object. * @throws IOException if there was a problem reading the file. * @throws ParseException if there was a problem parsing the file content. */ private static ApplicationBuilder createFromFile(String fileName, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryprofileRegistry) throws IOException, ParseException { return createFromFiles(Collections.singletonList(fileName), new MockFileRegistry(), deployLogger, new TestProperties(), rankProfileRegistry, queryprofileRegistry); } /** * Convenience factory methdd to create a SearchBuilder from multiple SD files.. */ private static ApplicationBuilder createFromFiles(Collection<String> fileNames, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryprofileRegistry) throws IOException, ParseException { ApplicationBuilder builder = new ApplicationBuilder(MockApplicationPackage.createEmpty(), fileRegistry, deployLogger, properties, rankProfileRegistry, queryprofileRegistry); for (String fileName : fileNames) { builder.addSchemaFile(fileName); } builder.build(true); return builder; } public static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties) throws IOException, ParseException { return createFromDirectory(dir, fileRegistry, logger, properties, new RankProfileRegistry()); } public static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry) throws IOException, ParseException { return createFromDirectory(dir, fileRegistry, logger, properties, rankProfileRegistry, createQueryProfileRegistryFromDirectory(dir)); } private static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return createFromDirectory(dir, MockApplicationPackage.fromSearchDefinitionAndRootDirectory(dir), fileRegistry, logger, properties, rankProfileRegistry, queryProfileRegistry); } private static ApplicationBuilder createFromDirectory(String dir, ApplicationPackage applicationPackage, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { ApplicationBuilder builder = new ApplicationBuilder(applicationPackage, fileRegistry, deployLogger, properties, rankProfileRegistry, queryProfileRegistry); for (var i = Files.list(new File(dir).toPath()).filter(p -> p.getFileName().toString().endsWith(".sd")).iterator(); i.hasNext(); ) { builder.addSchemaFile(i.next().toString()); } builder.build(true); return builder; } private static QueryProfileRegistry createQueryProfileRegistryFromDirectory(String dir) { File queryProfilesDir = new File(dir, "query-profiles"); if ( ! queryProfilesDir.exists()) return new QueryProfileRegistry(); return new QueryProfileXMLReader().read(queryProfilesDir.toString()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. Only for testing. * * @param fileName the file to build from * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName) throws IOException, ParseException { return buildFromFile(fileName, new BaseDeployLogger(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. * * @param fileName the file to build from * @param rankProfileRegistry registry for rank profiles * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return buildFromFile(fileName, new BaseDeployLogger(), rankProfileRegistry, queryProfileRegistry); } /** * Convenience factory method to import and build a {@link Schema} from a file. * * @param fileName the file to build from * @param deployLogger logger for deploy messages * @param rankProfileRegistry registry for rank profiles * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return createFromFile(fileName, deployLogger, rankProfileRegistry, queryProfileRegistry).getSchema(); } /** * Convenience factory method to import and build a {@link Schema} object from a raw object. * * @param rawSchema the raw object to build from * @return the built {@link ApplicationBuilder} object * @see */ public static ApplicationBuilder createFromRawSchema(Schema rawSchema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { ApplicationBuilder builder = new ApplicationBuilder(rankProfileRegistry, queryProfileRegistry); builder.add(rawSchema); builder.build(true); return builder; } /** * Convenience factory method to import and build a {@link Schema} object from a raw object. * * @param rawSchema the raw object to build from * @return the built {@link Schema} object * @see */ public static Schema buildFromRawSchema(Schema rawSchema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { return createFromRawSchema(rawSchema, rankProfileRegistry, queryProfileRegistry).getSchema(); } public RankProfileRegistry getRankProfileRegistry() { return rankProfileRegistry; } public QueryProfileRegistry getQueryProfileRegistry() { return queryProfileRegistry; } public ModelContext.Properties getProperties() { return properties; } public DeployLogger getDeployLogger() { return deployLogger; } @SuppressWarnings("EmptyCatchBlock") private static void closeIgnoreException(Reader reader) { try { reader.close(); } catch(Exception e) {} } }
class ApplicationBuilder { private final ApplicationPackage applicationPackage; private final List<Schema> schemas = new ArrayList<>(); private final DocumentTypeManager documentTypeManager = new DocumentTypeManager(); private final RankProfileRegistry rankProfileRegistry; private final QueryProfileRegistry queryProfileRegistry; private final FileRegistry fileRegistry; private final DeployLogger deployLogger; private final ModelContext.Properties properties; /** True to build the document aspect only, skipping instantiation of rank profiles */ private final boolean documentsOnly; private Application application; private final Set<Class<? extends Processor>> processorsToSkip = new HashSet<>(); /** For testing only */ public ApplicationBuilder() { this(new RankProfileRegistry(), new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(DeployLogger deployLogger) { this(MockApplicationPackage.createEmpty(), deployLogger); } /** For testing only */ public ApplicationBuilder(DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry) { this(MockApplicationPackage.createEmpty(), deployLogger, rankProfileRegistry); } /** Used for generating documents for typed access to document fields in Java */ public ApplicationBuilder(boolean documentsOnly) { this(MockApplicationPackage.createEmpty(), new MockFileRegistry(), new BaseDeployLogger(), new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry(), documentsOnly); } /** For testing only */ public ApplicationBuilder(ApplicationPackage app, DeployLogger deployLogger) { this(app, new MockFileRegistry(), deployLogger, new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(ApplicationPackage app, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry) { this(app, new MockFileRegistry(), deployLogger, new TestProperties(), rankProfileRegistry, new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(RankProfileRegistry rankProfileRegistry) { this(rankProfileRegistry, new QueryProfileRegistry()); } /** For testing only */ public ApplicationBuilder(RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { this(rankProfileRegistry, queryProfileRegistry, new TestProperties()); } public ApplicationBuilder(RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry, ModelContext.Properties properties) { this(MockApplicationPackage.createEmpty(), new MockFileRegistry(), new BaseDeployLogger(), properties, rankProfileRegistry, queryProfileRegistry); } public ApplicationBuilder(ApplicationPackage app, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { this(app, fileRegistry, deployLogger, properties, rankProfileRegistry, queryProfileRegistry, false); } private ApplicationBuilder(ApplicationPackage applicationPackage, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry, boolean documentsOnly) { this.applicationPackage = applicationPackage; this.rankProfileRegistry = rankProfileRegistry; this.queryProfileRegistry = queryProfileRegistry; this.fileRegistry = fileRegistry; this.deployLogger = deployLogger; this.properties = properties; this.documentsOnly = documentsOnly; for (NamedReader reader : applicationPackage.getSchemas()) addSchema(reader); } /** * Adds a schema to this application. * * @param fileName the name of the file to import * @return the name of the imported object * @throws IOException thrown if the file can not be read for some reason * @throws ParseException thrown if the file does not contain a valid search definition */ public Schema addSchemaFile(String fileName) throws IOException, ParseException { File file = new File(fileName); return addSchema(IOUtils.readFile(file)); } /** * Reads and parses the schema string provided by the given reader. Once all schemas have been * imported, call {@link * * @param reader the reader whose content to import */ public void addSchema(NamedReader reader) { try { String schemaName = addSchema(IOUtils.readAll(reader)).getName(); String schemaFileName = stripSuffix(reader.getName(), ApplicationPackage.SD_NAME_SUFFIX); if ( ! schemaFileName.equals(schemaName)) { throw new IllegalArgumentException("The file containing schema '" + schemaName + "' must be named '" + schemaName + ApplicationPackage.SD_NAME_SUFFIX + "', not " + reader.getName()); } } catch (ParseException e) { throw new IllegalArgumentException("Could not parse schema file '" + reader.getName() + "'", e); } catch (IOException e) { throw new IllegalArgumentException("Could not read schema file '" + reader.getName() + "'", e); } finally { closeIgnoreException(reader.getReader()); } } private static String stripSuffix(String readerName, String suffix) { if ( ! readerName.endsWith(suffix)) throw new IllegalArgumentException("Schema '" + readerName + "' does not end with " + suffix); return readerName.substring(0, readerName.length() - suffix.length()); } /** * Adds a schema to this * * @param schemaString the content of the schema */ public Schema addSchema(String schemaString) throws ParseException { return add(createSchema(schemaString)); } /** * Registers the given schema to the application to be built during {@link * {@link Schema} object is considered to be "raw" if it has not already been processed. This is the case for most * programmatically constructed schemas used in unit tests. * * @param schema the object to import * @throws IllegalArgumentException if the given search object has already been processed */ public Schema add(Schema schema) { if (schema.getName() == null) throw new IllegalArgumentException("Schema has no name"); schemas.add(schema); return schema; } private Schema createSchema(String schemaString) throws ParseException { Schema schema = parseSchema(schemaString); addRankProfileFiles(schema); return schema; } private Schema parseSchema(String schemaString) throws ParseException { SimpleCharStream stream = new SimpleCharStream(schemaString); try { return parserOf(stream).schema(documentTypeManager); } catch (TokenMgrException e) { throw new ParseException("Unknown symbol: " + e.getMessage()); } catch (ParseException pe) { throw new ParseException(stream.formatException(Exceptions.toMessageString(pe))); } } /** Parses the rank profile of the given reader and adds it to the rank profile registry for this schema. */ private void parseRankProfile(NamedReader reader, Schema schema) { try { SimpleCharStream stream = new SimpleCharStream(IOUtils.readAll(reader.getReader())); try { parserOf(stream).rankProfile(schema); } catch (TokenMgrException e) { throw new ParseException("Unknown symbol: " + e.getMessage()); } catch (ParseException pe) { throw new ParseException(stream.formatException(Exceptions.toMessageString(pe))); } } catch (IOException e) { throw new IllegalArgumentException("Could not read rank profile " + reader.getName(), e); } catch (ParseException e) { throw new IllegalArgumentException("Could not parse rank profile " + reader.getName(), e); } } private SDParser parserOf(SimpleCharStream stream) { return new SDParser(stream, applicationPackage, fileRegistry, deployLogger, properties, rankProfileRegistry, documentsOnly); } /** * Processes and finalizes the schemas of this. * * @throws IllegalStateException thrown if this method has already been called */ public Application build(boolean validate) { if (application != null) throw new IllegalStateException("Application already built"); application = new Application(applicationPackage, schemas, rankProfileRegistry, new QueryProfiles(queryProfileRegistry, deployLogger), properties, documentsOnly, validate, processorsToSkip, deployLogger); return application; } /** Returns a modifiable set of processors we should skip for these schemas. Useful for testing. */ public Set<Class<? extends Processor>> processorsToSkip() { return processorsToSkip; } /** * Convenience method to call {@link * built. This method will never return null. * * @return the built object * @throws IllegalStateException if there is not exactly one search. */ public Schema getSchema() { if (application == null) throw new IllegalStateException("Application not built"); if (application.schemas().size() != 1) throw new IllegalStateException("This call only works if we have 1 schema. Schemas: " + application.schemas().values()); return application.schemas().values().stream().findAny().get(); } public DocumentModel getModel() { return application.documentModel(); } /** * Returns the built {@link Schema} object that has the given name. If the name is unknown, this method will simply * return null. * * @param name the name of the schema to return, * or null to return the only one or throw an exception if there are multiple to choose from * @return the built object, or null if none with this name * @throws IllegalStateException if {@link */ public Schema getSchema(String name) { if (application == null) throw new IllegalStateException("Application not built"); if (name == null) return getSchema(); return application.schemas().get(name); } public Application application() { return application; } /** * Convenience method to return a list of all built {@link Schema} objects. * * @return the list of built searches */ public List<Schema> getSchemaList() { return new ArrayList<>(application.schemas().values()); } /** * Convenience factory method to import and build a {@link Schema} object from a string. * * @param sd the string to build from * @return the built {@link ApplicationBuilder} object * @throws ParseException thrown if there is a problem parsing the string */ public static ApplicationBuilder createFromString(String sd) throws ParseException { return createFromString(sd, new BaseDeployLogger()); } public static ApplicationBuilder createFromString(String sd, DeployLogger logger) throws ParseException { ApplicationBuilder builder = new ApplicationBuilder(logger); builder.addSchema(sd); builder.build(true); return builder; } public static ApplicationBuilder createFromStrings(DeployLogger logger, String ... schemas) throws ParseException { ApplicationBuilder builder = new ApplicationBuilder(logger); for (var schema : schemas) builder.addSchema(schema); builder.build(true); return builder; } /** * Convenience factory method to import and build a {@link Schema} object from a file. Only for testing. * * @param fileName the file to build from * @return the built {@link ApplicationBuilder} object * @throws IOException if there was a problem reading the file. * @throws ParseException if there was a problem parsing the file content. */ public static ApplicationBuilder createFromFile(String fileName) throws IOException, ParseException { return createFromFile(fileName, new BaseDeployLogger()); } /** * Convenience factory methdd to create a SearchBuilder from multiple SD files. Only for testing. */ public static ApplicationBuilder createFromFiles(Collection<String> fileNames) throws IOException, ParseException { return createFromFiles(fileNames, new BaseDeployLogger()); } public static ApplicationBuilder createFromFile(String fileName, DeployLogger logger) throws IOException, ParseException { return createFromFile(fileName, logger, new RankProfileRegistry(), new QueryProfileRegistry()); } private static ApplicationBuilder createFromFiles(Collection<String> fileNames, DeployLogger logger) throws IOException, ParseException { return createFromFiles(fileNames, new MockFileRegistry(), logger, new TestProperties(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. * * @param fileName the file to build from. * @param deployLogger logger for deploy messages. * @param rankProfileRegistry registry for rank profiles. * @return the built {@link ApplicationBuilder} object. * @throws IOException if there was a problem reading the file. * @throws ParseException if there was a problem parsing the file content. */ private static ApplicationBuilder createFromFile(String fileName, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryprofileRegistry) throws IOException, ParseException { return createFromFiles(Collections.singletonList(fileName), new MockFileRegistry(), deployLogger, new TestProperties(), rankProfileRegistry, queryprofileRegistry); } /** * Convenience factory methdd to create a SearchBuilder from multiple SD files.. */ private static ApplicationBuilder createFromFiles(Collection<String> fileNames, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryprofileRegistry) throws IOException, ParseException { ApplicationBuilder builder = new ApplicationBuilder(MockApplicationPackage.createEmpty(), fileRegistry, deployLogger, properties, rankProfileRegistry, queryprofileRegistry); for (String fileName : fileNames) { builder.addSchemaFile(fileName); } builder.build(true); return builder; } public static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties) throws IOException, ParseException { return createFromDirectory(dir, fileRegistry, logger, properties, new RankProfileRegistry()); } public static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry) throws IOException, ParseException { return createFromDirectory(dir, fileRegistry, logger, properties, rankProfileRegistry, createQueryProfileRegistryFromDirectory(dir)); } private static ApplicationBuilder createFromDirectory(String dir, FileRegistry fileRegistry, DeployLogger logger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return createFromDirectory(dir, MockApplicationPackage.fromSearchDefinitionAndRootDirectory(dir), fileRegistry, logger, properties, rankProfileRegistry, queryProfileRegistry); } private static ApplicationBuilder createFromDirectory(String dir, ApplicationPackage applicationPackage, FileRegistry fileRegistry, DeployLogger deployLogger, ModelContext.Properties properties, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { ApplicationBuilder builder = new ApplicationBuilder(applicationPackage, fileRegistry, deployLogger, properties, rankProfileRegistry, queryProfileRegistry); for (var i = Files.list(new File(dir).toPath()).filter(p -> p.getFileName().toString().endsWith(".sd")).iterator(); i.hasNext(); ) { builder.addSchemaFile(i.next().toString()); } builder.build(true); return builder; } private static QueryProfileRegistry createQueryProfileRegistryFromDirectory(String dir) { File queryProfilesDir = new File(dir, "query-profiles"); if ( ! queryProfilesDir.exists()) return new QueryProfileRegistry(); return new QueryProfileXMLReader().read(queryProfilesDir.toString()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. Only for testing. * * @param fileName the file to build from * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName) throws IOException, ParseException { return buildFromFile(fileName, new BaseDeployLogger(), new RankProfileRegistry(), new QueryProfileRegistry()); } /** * Convenience factory method to import and build a {@link Schema} object from a file. * * @param fileName the file to build from * @param rankProfileRegistry registry for rank profiles * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return buildFromFile(fileName, new BaseDeployLogger(), rankProfileRegistry, queryProfileRegistry); } /** * Convenience factory method to import and build a {@link Schema} from a file. * * @param fileName the file to build from * @param deployLogger logger for deploy messages * @param rankProfileRegistry registry for rank profiles * @return the built {@link Schema} object * @throws IOException thrown if there was a problem reading the file * @throws ParseException thrown if there was a problem parsing the file content */ public static Schema buildFromFile(String fileName, DeployLogger deployLogger, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) throws IOException, ParseException { return createFromFile(fileName, deployLogger, rankProfileRegistry, queryProfileRegistry).getSchema(); } /** * Convenience factory method to import and build a {@link Schema} object from a raw object. * * @param rawSchema the raw object to build from * @return the built {@link ApplicationBuilder} object * @see */ public static ApplicationBuilder createFromRawSchema(Schema rawSchema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { ApplicationBuilder builder = new ApplicationBuilder(rankProfileRegistry, queryProfileRegistry); builder.add(rawSchema); builder.build(true); return builder; } /** * Convenience factory method to import and build a {@link Schema} object from a raw object. * * @param rawSchema the raw object to build from * @return the built {@link Schema} object * @see */ public static Schema buildFromRawSchema(Schema rawSchema, RankProfileRegistry rankProfileRegistry, QueryProfileRegistry queryProfileRegistry) { return createFromRawSchema(rawSchema, rankProfileRegistry, queryProfileRegistry).getSchema(); } public RankProfileRegistry getRankProfileRegistry() { return rankProfileRegistry; } public QueryProfileRegistry getQueryProfileRegistry() { return queryProfileRegistry; } public ModelContext.Properties getProperties() { return properties; } public DeployLogger getDeployLogger() { return deployLogger; } @SuppressWarnings("EmptyCatchBlock") private static void closeIgnoreException(Reader reader) { try { reader.close(); } catch(Exception e) {} } }
Oops!
public int compareTo(ApplicationVersion o) { if (buildNumber().isEmpty() || o.buildNumber().isEmpty()) return Boolean.compare(buildNumber().isPresent(), o.buildNumber.isPresent()); if (deployedDirectly != o.deployedDirectly) return Boolean.compare( ! deployedDirectly, ! o.deployedDirectly); return Long.compare(buildNumber().getAsLong(), o.buildNumber().getAsLong()); }
if (deployedDirectly != o.deployedDirectly)
public int compareTo(ApplicationVersion o) { if (buildNumber().isEmpty() || o.buildNumber().isEmpty()) return Boolean.compare(buildNumber().isPresent(), o.buildNumber.isPresent()); if (deployedDirectly != o.deployedDirectly) return Boolean.compare( ! deployedDirectly, ! o.deployedDirectly); return Long.compare(buildNumber().getAsLong(), o.buildNumber().getAsLong()); }
class ApplicationVersion implements Comparable<ApplicationVersion> { /** * Used in cases where application version cannot be determined, such as manual deployments (e.g. in dev * environment) */ public static final ApplicationVersion unknown = new ApplicationVersion(Optional.empty(), OptionalLong.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), true, Optional.empty()); private static final String majorVersion = "1.0"; private final Optional<SourceRevision> source; private final Optional<String> authorEmail; private final OptionalLong buildNumber; private final Optional<Version> compileVersion; private final Optional<Instant> buildTime; private final Optional<String> sourceUrl; private final Optional<String> commit; private final boolean deployedDirectly; private final Optional<String> bundleHash; /** Public for serialisation only. */ public ApplicationVersion(Optional<SourceRevision> source, OptionalLong buildNumber, Optional<String> authorEmail, Optional<Version> compileVersion, Optional<Instant> buildTime, Optional<String> sourceUrl, Optional<String> commit, boolean deployedDirectly, Optional<String> bundleHash) { if (buildNumber.isEmpty() && ( source.isPresent() || authorEmail.isPresent() || compileVersion.isPresent() || buildTime.isPresent() || sourceUrl.isPresent() || commit.isPresent())) throw new IllegalArgumentException("Build number must be present if any other attribute is"); if (buildNumber.isPresent() && buildNumber.getAsLong() <= 0) throw new IllegalArgumentException("Build number must be > 0"); if (commit.isPresent() && commit.get().length() > 128) throw new IllegalArgumentException("Commit may not be longer than 128 characters"); if (authorEmail.isPresent() && ! authorEmail.get().matches("[^@]+@[^@]+")) throw new IllegalArgumentException("Invalid author email '" + authorEmail.get() + "'."); if (compileVersion.isPresent() && compileVersion.get().equals(Version.emptyVersion)) throw new IllegalArgumentException("The empty version is not a legal compile version."); this.source = source; this.buildNumber = buildNumber; this.authorEmail = authorEmail; this.compileVersion = compileVersion; this.buildTime = buildTime; this.sourceUrl = Objects.requireNonNull(sourceUrl, "sourceUrl cannot be null"); this.commit = Objects.requireNonNull(commit, "commit cannot be null"); this.deployedDirectly = deployedDirectly; this.bundleHash = bundleHash; } /** Create an application package version from a completed build, without an author email */ public static ApplicationVersion from(SourceRevision source, long buildNumber) { return new ApplicationVersion(Optional.of(source), OptionalLong.of(buildNumber), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), false, Optional.empty()); } /** Creates a version from a completed build, an author email, and build meta data. */ public static ApplicationVersion from(SourceRevision source, long buildNumber, String authorEmail, Version compileVersion, Instant buildTime) { return new ApplicationVersion(Optional.of(source), OptionalLong.of(buildNumber), Optional.of(authorEmail), Optional.of(compileVersion), Optional.of(buildTime), Optional.empty(), Optional.empty(), false, Optional.empty()); } /** Creates a version from a completed build, an author email, and build meta data. */ public static ApplicationVersion from(Optional<SourceRevision> source, long buildNumber, Optional<String> authorEmail, Optional<Version> compileVersion, Optional<Instant> buildTime, Optional<String> sourceUrl, Optional<String> commit, boolean deployedDirectly, Optional<String> bundleHash) { return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail, compileVersion, buildTime, sourceUrl, commit, deployedDirectly, bundleHash); } /** Returns a unique identifier for this version or "unknown" if version is not known */ public String id() { if (isUnknown()) return "unknown"; return source.map(SourceRevision::commit).map(ApplicationVersion::abbreviateCommit) .or(this::commit) .map(commit -> String.format("%s.%d-%s", majorVersion, buildNumber.getAsLong(), commit)) .orElseGet(() -> majorVersion + "." + buildNumber.getAsLong()); } /** * Returns information about the source of this revision, or empty if the source is not know/defined * (which is the case for command-line deployment from developers, but never for deployment jobs) */ public Optional<SourceRevision> source() { return source; } /** Returns the build number that built this version */ public OptionalLong buildNumber() { return buildNumber; } /** Returns the email of the author of commit of this version, if known */ public Optional<String> authorEmail() { return authorEmail; } /** Returns the Vespa version this package was compiled against, if known. */ public Optional<Version> compileVersion() { return compileVersion; } /** Returns the time this package was built, if known. */ public Optional<Instant> buildTime() { return buildTime; } /** Returns the hash of app package except deployment/build-meta data */ public Optional<String> bundleHash() { return bundleHash; } /** Returns the source URL for this application version. */ public Optional<String> sourceUrl() { return sourceUrl.or(() -> source.map(source -> { String repository = source.repository(); if (repository.startsWith("git@")) repository = "https: if (repository.endsWith(".git")) repository = repository.substring(0, repository.length() - 4); return repository + "/tree/" + source.commit(); })); } /** Returns the commit name of this application version. */ public Optional<String> commit() { return commit.or(() -> source.map(SourceRevision::commit)); } /** Returns whether this is unknown */ public boolean isUnknown() { return this.equals(unknown); } /** Returns whether the application package for this version was deployed directly to zone */ public boolean isDeployedDirectly() { return deployedDirectly; } @Override public boolean equals(Object o) { if (this == o) return true; if ( ! (o instanceof ApplicationVersion)) return false; ApplicationVersion that = (ApplicationVersion) o; return Objects.equals(buildNumber, that.buildNumber) && Objects.equals(commit(), that.commit()) && deployedDirectly == that.deployedDirectly; } @Override public int hashCode() { return Objects.hash(buildNumber, commit(), deployedDirectly); } @Override public String toString() { return "Application package version: " + id() + source.map(s -> ", " + s.toString()).orElse("") + authorEmail.map(e -> ", by " + e).orElse("") + compileVersion.map(v -> ", built against " + v).orElse("") + buildTime.map(t -> " at " + t).orElse("") ; } /** Abbreviate given commit hash to 9 characters */ private static String abbreviateCommit(String hash) { return hash.length() <= 9 ? hash : hash.substring(0, 9); } @Override }
class ApplicationVersion implements Comparable<ApplicationVersion> { /** * Used in cases where application version cannot be determined, such as manual deployments (e.g. in dev * environment) */ public static final ApplicationVersion unknown = new ApplicationVersion(Optional.empty(), OptionalLong.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), true, Optional.empty()); private static final String majorVersion = "1.0"; private final Optional<SourceRevision> source; private final Optional<String> authorEmail; private final OptionalLong buildNumber; private final Optional<Version> compileVersion; private final Optional<Instant> buildTime; private final Optional<String> sourceUrl; private final Optional<String> commit; private final boolean deployedDirectly; private final Optional<String> bundleHash; /** Public for serialisation only. */ public ApplicationVersion(Optional<SourceRevision> source, OptionalLong buildNumber, Optional<String> authorEmail, Optional<Version> compileVersion, Optional<Instant> buildTime, Optional<String> sourceUrl, Optional<String> commit, boolean deployedDirectly, Optional<String> bundleHash) { if (buildNumber.isEmpty() && ( source.isPresent() || authorEmail.isPresent() || compileVersion.isPresent() || buildTime.isPresent() || sourceUrl.isPresent() || commit.isPresent())) throw new IllegalArgumentException("Build number must be present if any other attribute is"); if (buildNumber.isPresent() && buildNumber.getAsLong() <= 0) throw new IllegalArgumentException("Build number must be > 0"); if (commit.isPresent() && commit.get().length() > 128) throw new IllegalArgumentException("Commit may not be longer than 128 characters"); if (authorEmail.isPresent() && ! authorEmail.get().matches("[^@]+@[^@]+")) throw new IllegalArgumentException("Invalid author email '" + authorEmail.get() + "'."); if (compileVersion.isPresent() && compileVersion.get().equals(Version.emptyVersion)) throw new IllegalArgumentException("The empty version is not a legal compile version."); this.source = source; this.buildNumber = buildNumber; this.authorEmail = authorEmail; this.compileVersion = compileVersion; this.buildTime = buildTime; this.sourceUrl = Objects.requireNonNull(sourceUrl, "sourceUrl cannot be null"); this.commit = Objects.requireNonNull(commit, "commit cannot be null"); this.deployedDirectly = deployedDirectly; this.bundleHash = bundleHash; } /** Create an application package version from a completed build, without an author email */ public static ApplicationVersion from(SourceRevision source, long buildNumber) { return new ApplicationVersion(Optional.of(source), OptionalLong.of(buildNumber), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), false, Optional.empty()); } /** Creates a version from a completed build, an author email, and build meta data. */ public static ApplicationVersion from(SourceRevision source, long buildNumber, String authorEmail, Version compileVersion, Instant buildTime) { return new ApplicationVersion(Optional.of(source), OptionalLong.of(buildNumber), Optional.of(authorEmail), Optional.of(compileVersion), Optional.of(buildTime), Optional.empty(), Optional.empty(), false, Optional.empty()); } /** Creates a version from a completed build, an author email, and build meta data. */ public static ApplicationVersion from(Optional<SourceRevision> source, long buildNumber, Optional<String> authorEmail, Optional<Version> compileVersion, Optional<Instant> buildTime, Optional<String> sourceUrl, Optional<String> commit, boolean deployedDirectly, Optional<String> bundleHash) { return new ApplicationVersion(source, OptionalLong.of(buildNumber), authorEmail, compileVersion, buildTime, sourceUrl, commit, deployedDirectly, bundleHash); } /** Returns a unique identifier for this version or "unknown" if version is not known */ public String id() { if (isUnknown()) return "unknown"; return source.map(SourceRevision::commit).map(ApplicationVersion::abbreviateCommit) .or(this::commit) .map(commit -> String.format("%s.%d-%s", majorVersion, buildNumber.getAsLong(), commit)) .orElseGet(() -> majorVersion + "." + buildNumber.getAsLong()); } /** * Returns information about the source of this revision, or empty if the source is not know/defined * (which is the case for command-line deployment from developers, but never for deployment jobs) */ public Optional<SourceRevision> source() { return source; } /** Returns the build number that built this version */ public OptionalLong buildNumber() { return buildNumber; } /** Returns the email of the author of commit of this version, if known */ public Optional<String> authorEmail() { return authorEmail; } /** Returns the Vespa version this package was compiled against, if known. */ public Optional<Version> compileVersion() { return compileVersion; } /** Returns the time this package was built, if known. */ public Optional<Instant> buildTime() { return buildTime; } /** Returns the hash of app package except deployment/build-meta data */ public Optional<String> bundleHash() { return bundleHash; } /** Returns the source URL for this application version. */ public Optional<String> sourceUrl() { return sourceUrl.or(() -> source.map(source -> { String repository = source.repository(); if (repository.startsWith("git@")) repository = "https: if (repository.endsWith(".git")) repository = repository.substring(0, repository.length() - 4); return repository + "/tree/" + source.commit(); })); } /** Returns the commit name of this application version. */ public Optional<String> commit() { return commit.or(() -> source.map(SourceRevision::commit)); } /** Returns whether this is unknown */ public boolean isUnknown() { return this.equals(unknown); } /** Returns whether the application package for this version was deployed directly to zone */ public boolean isDeployedDirectly() { return deployedDirectly; } @Override public boolean equals(Object o) { if (this == o) return true; if ( ! (o instanceof ApplicationVersion)) return false; ApplicationVersion that = (ApplicationVersion) o; return Objects.equals(buildNumber, that.buildNumber) && Objects.equals(commit(), that.commit()) && deployedDirectly == that.deployedDirectly; } @Override public int hashCode() { return Objects.hash(buildNumber, commit(), deployedDirectly); } @Override public String toString() { return "Application package version: " + id() + source.map(s -> ", " + s.toString()).orElse("") + authorEmail.map(e -> ", by " + e).orElse("") + compileVersion.map(v -> ", built against " + v).orElse("") + buildTime.map(t -> " at " + t).orElse("") ; } /** Abbreviate given commit hash to 9 characters */ private static String abbreviateCommit(String hash) { return hash.length() <= 9 ? hash : hash.substring(0, 9); } @Override }
Possibly double that as we do prepare, then prepareWithLocks.
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
if (suspended(host)) return false;
public boolean canAllocateTenantNodeTo(Node host, boolean dynamicProvisioning) { if ( ! host.type().canRun(NodeType.tenant)) return false; if (host.status().wantToRetire()) return false; if (host.allocation().map(alloc -> alloc.membership().retired()).orElse(false)) return false; if (suspended(host)) return false; if (dynamicProvisioning) return EnumSet.of(Node.State.active, Node.State.ready, Node.State.provisioned).contains(host.state()); else return host.state() == Node.State.active; }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
class Nodes { private static final Logger log = Logger.getLogger(Nodes.class.getName()); private final CuratorDatabaseClient db; private final Zone zone; private final Clock clock; private final Orchestrator orchestrator; public Nodes(CuratorDatabaseClient db, Zone zone, Clock clock, Orchestrator orchestrator) { this.zone = zone; this.clock = clock; this.db = db; this.orchestrator = orchestrator; } /** Read and write all nodes to make sure they are stored in the latest version of the serialized format */ public void rewrite() { Instant start = clock.instant(); int nodesWritten = 0; for (Node.State state : Node.State.values()) { List<Node> nodes = db.readNodes(state); db.writeTo(state, nodes, Agent.system, Optional.empty()); nodesWritten += nodes.size(); } Instant end = clock.instant(); log.log(Level.INFO, String.format("Rewrote %d nodes in %s", nodesWritten, Duration.between(start, end))); } /** * Finds and returns the node with the hostname in any of the given states, or empty if not found * * @param hostname the full host name of the node * @param inState the states the node may be in. If no states are given, it will be returned from any state * @return the node, or empty if it was not found in any of the given states */ public Optional<Node> node(String hostname, Node.State... inState) { return db.readNode(hostname, inState); } /** * Returns a list of nodes in this repository in any of the given states * * @param inState the states to return nodes from. If no states are given, all nodes of the given type are returned */ public NodeList list(Node.State... inState) { return NodeList.copyOf(db.readNodes(inState)); } /** Returns a locked list of all nodes in this repository */ public LockedNodeList list(Mutex lock) { return new LockedNodeList(list().asList(), lock); } /** * Returns whether the zone managed by this node repository seems to be working. * If too many nodes are not responding, there is probably some zone-wide issue * and we should probably refrain from making changes to it. */ public boolean isWorking() { NodeList activeNodes = list(Node.State.active); if (activeNodes.size() <= 5) return true; NodeList downNodes = activeNodes.down(); return ! ( (double)downNodes.size() / (double)activeNodes.size() > 0.2 ); } /** Adds a list of newly created reserved nodes to the node repository */ public List<Node> addReservedNodes(LockedNodeList nodes) { for (Node node : nodes) { if ( node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) illegal("Cannot add " + node + ": This is not a child node"); if (node.allocation().isEmpty()) illegal("Cannot add " + node + ": Child nodes need to be allocated"); Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) illegal("Cannot add " + node + ": A node with this name already exists (" + existing.get() + ", " + existing.get().history() + "). Node to be added: " + node + ", " + node.history()); } return db.addNodesInState(nodes.asList(), Node.State.reserved, Agent.system); } /** * Adds a list of (newly created) nodes to the node repository as provisioned nodes. * If any of the nodes already exists in the deprovisioned state, the new node will be merged * with the history of that node. */ public List<Node> addNodes(List<Node> nodes, Agent agent) { try (Mutex lock = lockUnallocated()) { List<Node> nodesToAdd = new ArrayList<>(); List<Node> nodesToRemove = new ArrayList<>(); for (int i = 0; i < nodes.size(); i++) { var node = nodes.get(i); for (int j = 0; j < i; j++) { if (node.equals(nodes.get(j))) illegal("Cannot add nodes: " + node + " is duplicated in the argument list"); } Optional<Node> existing = node(node.hostname()); if (existing.isPresent()) { if (existing.get().state() != Node.State.deprovisioned) illegal("Cannot add " + node + ": A node with this name already exists"); node = node.with(existing.get().history()); node = node.with(existing.get().reports()); node = node.with(node.status().withFailCount(existing.get().status().failCount())); if (existing.get().status().firmwareVerifiedAt().isPresent()) node = node.with(node.status().withFirmwareVerifiedAt(existing.get().status().firmwareVerifiedAt().get())); boolean rebuilding = existing.get().status().wantToRebuild(); if (rebuilding) { node = node.with(node.status().withWantToRetire(existing.get().status().wantToRetire(), false, rebuilding)); } nodesToRemove.add(existing.get()); } nodesToAdd.add(node); } NestedTransaction transaction = new NestedTransaction(); List<Node> resultingNodes = db.addNodesInState(IP.Config.verify(nodesToAdd, list(lock)), Node.State.provisioned, agent, transaction); db.removeNodes(nodesToRemove, transaction); transaction.commit(); return resultingNodes; } } /** Sets a list of nodes ready and returns the nodes in the ready state */ public List<Node> setReady(List<Node> nodes, Agent agent, String reason) { try (Mutex lock = lockUnallocated()) { List<Node> nodesWithResetFields = nodes.stream() .map(node -> { if (node.state() != Node.State.provisioned && node.state() != Node.State.dirty) illegal("Can not set " + node + " ready. It is not provisioned or dirty."); return node.withWantToRetire(false, false, false, Agent.system, clock.instant()); }) .collect(Collectors.toList()); return db.writeTo(Node.State.ready, nodesWithResetFields, agent, Optional.of(reason)); } } public Node setReady(String hostname, Agent agent, String reason) { Node nodeToReady = requireNode(hostname); if (nodeToReady.state() == Node.State.ready) return nodeToReady; return setReady(List.of(nodeToReady), agent, reason).get(0); } /** Reserve nodes. This method does <b>not</b> lock the node repository */ public List<Node> reserve(List<Node> nodes) { return db.writeTo(Node.State.reserved, nodes, Agent.application, Optional.empty()); } /** Activate nodes. This method does <b>not</b> lock the node repository */ public List<Node> activate(List<Node> nodes, NestedTransaction transaction) { return db.writeTo(Node.State.active, nodes, Agent.application, Optional.empty(), transaction); } /** * Sets a list of nodes to have their allocation removable (active to inactive) in the node repository. * * @param application the application the nodes belong to * @param nodes the nodes to make removable. These nodes MUST be in the active state. */ public void setRemovable(ApplicationId application, List<Node> nodes) { try (Mutex lock = lock(application)) { List<Node> removableNodes = nodes.stream() .map(node -> node.with(node.allocation().get().removable(true))) .collect(Collectors.toList()); write(removableNodes, lock); } } /** * Deactivates these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> deactivate(List<Node> nodes, ApplicationTransaction transaction) { if ( ! zone.environment().isProduction() || zone.system().isCd()) return deallocate(nodes, Agent.application, "Deactivated by application", transaction.nested()); var stateless = NodeList.copyOf(nodes).stateless(); var stateful = NodeList.copyOf(nodes).stateful(); List<Node> written = new ArrayList<>(); written.addAll(deallocate(stateless.asList(), Agent.application, "Deactivated by application", transaction.nested())); written.addAll(db.writeTo(Node.State.inactive, stateful.asList(), Agent.application, Optional.empty(), transaction.nested())); return written; } /** * Fails these nodes in a transaction and returns the nodes in the new state which will hold if the * transaction commits. */ public List<Node> fail(List<Node> nodes, ApplicationTransaction transaction) { return fail(nodes, Agent.application, "Failed by application", transaction.nested()); } public List<Node> fail(List<Node> nodes, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); nodes = fail(nodes, agent, reason, transaction); transaction.commit(); return nodes; } private List<Node> fail(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { nodes = nodes.stream() .map(n -> n.withWantToFail(false, agent, clock.instant())) .collect(Collectors.toList()); return db.writeTo(Node.State.failed, nodes, agent, Optional.of(reason), transaction); } /** Move nodes to the dirty state */ public List<Node> deallocate(List<Node> nodes, Agent agent, String reason) { return performOn(NodeList.copyOf(nodes), (node, lock) -> deallocate(node, agent, reason)); } public List<Node> deallocateRecursively(String hostname, Agent agent, String reason) { Node nodeToDirty = node(hostname).orElseThrow(() -> new IllegalArgumentException("Could not deallocate " + hostname + ": Node not found")); List<Node> nodesToDirty = (nodeToDirty.type().isHost() ? Stream.concat(list().childrenOf(hostname).asList().stream(), Stream.of(nodeToDirty)) : Stream.of(nodeToDirty)) .filter(node -> node.state() != Node.State.dirty) .collect(Collectors.toList()); List<String> hostnamesNotAllowedToDirty = nodesToDirty.stream() .filter(node -> node.state() != Node.State.provisioned) .filter(node -> node.state() != Node.State.failed) .filter(node -> node.state() != Node.State.parked) .filter(node -> node.state() != Node.State.breakfixed) .map(Node::hostname) .collect(Collectors.toList()); if ( ! hostnamesNotAllowedToDirty.isEmpty()) illegal("Could not deallocate " + nodeToDirty + ": " + hostnamesNotAllowedToDirty + " are not in states [provisioned, failed, parked, breakfixed]"); return nodesToDirty.stream().map(node -> deallocate(node, agent, reason)).collect(Collectors.toList()); } /** * Set a node dirty or parked, allowed if it is in the provisioned, inactive, failed or parked state. * Use this to clean newly provisioned nodes or to recycle failed nodes which have been repaired or put on hold. */ public Node deallocate(Node node, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node deallocated = deallocate(node, agent, reason, transaction); transaction.commit(); return deallocated; } public List<Node> deallocate(List<Node> nodes, Agent agent, String reason, NestedTransaction transaction) { return nodes.stream().map(node -> deallocate(node, agent, reason, transaction)).collect(Collectors.toList()); } public Node deallocate(Node node, Agent agent, String reason, NestedTransaction transaction) { if (parkOnDeallocationOf(node, agent)) { return park(node.hostname(), false, agent, reason, transaction); } else { return db.writeTo(Node.State.dirty, List.of(node), agent, Optional.of(reason), transaction).get(0); } } /** * Fails this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node fail(String hostname, Agent agent, String reason) { return fail(hostname, true, agent, reason); } public Node fail(String hostname, boolean keepAllocation, Agent agent, String reason) { return move(hostname, Node.State.failed, agent, keepAllocation, Optional.of(reason)); } /** * Fails all the nodes that are children of hostname before finally failing the hostname itself. * Non-active nodes are failed immediately, while active nodes are marked as wantToFail. * The host is failed if it has no active nodes and marked wantToFail if it has. * * @return all the nodes that were changed by this request */ public List<Node> failOrMarkRecursively(String hostname, Agent agent, String reason) { NodeList children = list().childrenOf(hostname); List<Node> changed = performOn(children, (node, lock) -> failOrMark(node, agent, reason, lock)); if (children.state(Node.State.active).isEmpty()) changed.add(move(hostname, Node.State.failed, agent, true, Optional.of(reason))); else changed.addAll(performOn(NodeList.of(node(hostname).orElseThrow()), (node, lock) -> failOrMark(node, agent, reason, lock))); return changed; } private Node failOrMark(Node node, Agent agent, String reason, Mutex lock) { if (node.state() == Node.State.active) { node = node.withWantToFail(true, agent, clock.instant()); write(node, lock); return node; } else { return move(node.hostname(), Node.State.failed, agent, true, Optional.of(reason)); } } /** * Parks this node and returns it in its new state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node park(String hostname, boolean keepAllocation, Agent agent, String reason) { NestedTransaction transaction = new NestedTransaction(); Node parked = park(hostname, keepAllocation, agent, reason, transaction); transaction.commit(); return parked; } private Node park(String hostname, boolean keepAllocation, Agent agent, String reason, NestedTransaction transaction) { return move(hostname, Node.State.parked, agent, keepAllocation, Optional.of(reason), transaction); } /** * Parks all the nodes that are children of hostname before finally parking the hostname itself. * * @return List of all the parked nodes in their new state */ public List<Node> parkRecursively(String hostname, Agent agent, String reason) { return moveRecursively(hostname, Node.State.parked, agent, Optional.of(reason)); } /** * Moves a previously failed or parked node back to the active state. * * @return the node in its new state * @throws NoSuchNodeException if the node is not found */ public Node reactivate(String hostname, Agent agent, String reason) { return move(hostname, Node.State.active, agent, true, Optional.of(reason)); } /** * Moves a host to breakfixed state, removing any children. */ public List<Node> breakfixRecursively(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); try (Mutex lock = lockUnallocated()) { requireBreakfixable(node); NestedTransaction transaction = new NestedTransaction(); List<Node> removed = removeChildren(node, false, transaction); removed.add(move(node.hostname(), Node.State.breakfixed, agent, true, Optional.of(reason), transaction)); transaction.commit(); return removed; } } private List<Node> moveRecursively(String hostname, Node.State toState, Agent agent, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); List<Node> moved = list().childrenOf(hostname).asList().stream() .map(child -> move(child.hostname(), toState, agent, true, reason, transaction)) .collect(Collectors.toList()); moved.add(move(hostname, toState, agent, true, reason, transaction)); transaction.commit(); return moved; } /** Move a node to given state */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason) { NestedTransaction transaction = new NestedTransaction(); Node moved = move(hostname, toState, agent, keepAllocation, reason, transaction); transaction.commit(); return moved; } /** Move a node to given state as part of a transaction */ private Node move(String hostname, Node.State toState, Agent agent, boolean keepAllocation, Optional<String> reason, NestedTransaction transaction) { try (NodeMutex lock = lockAndGetRequired(hostname)) { Node node = lock.node(); if (toState == Node.State.active) { if (node.allocation().isEmpty()) illegal("Could not set " + node + " active: It has no allocation"); if (!keepAllocation) illegal("Could not set " + node + " active: Requested to discard allocation"); for (Node currentActive : list(Node.State.active).owner(node.allocation().get().owner())) { if (node.allocation().get().membership().cluster().equals(currentActive.allocation().get().membership().cluster()) && node.allocation().get().membership().index() == currentActive.allocation().get().membership().index()) illegal("Could not set " + node + " active: Same cluster and index as " + currentActive); } } if (!keepAllocation && node.allocation().isPresent()) { node = node.withoutAllocation(); } if (toState == Node.State.deprovisioned) { node = node.with(IP.Config.EMPTY); } return db.writeTo(toState, List.of(node), agent, reason, transaction).get(0); } } /* * This method is used by the REST API to handle readying nodes for new allocations. For Linux * containers this will remove the node from node repository, otherwise the node will be moved to state ready. */ public Node markNodeAvailableForNewAllocation(String hostname, Agent agent, String reason) { Node node = requireNode(hostname); if (node.flavor().getType() == Flavor.Type.DOCKER_CONTAINER && node.type() == NodeType.tenant) { if (node.state() != Node.State.dirty) illegal("Cannot make " + node + " available for new allocation as it is not in state [dirty]"); return removeRecursively(node, true).get(0); } if (node.state() == Node.State.ready) return node; Node parentHost = node.parentHostname().flatMap(this::node).orElse(node); List<String> failureReasons = NodeFailer.reasonsToFailHost(parentHost); if ( ! failureReasons.isEmpty()) illegal(node + " cannot be readied because it has hard failures: " + failureReasons); return setReady(List.of(node), agent, reason).get(0); } /** * Removes all the nodes that are children of hostname before finally removing the hostname itself. * * @return a List of all the nodes that have been removed or (for hosts) deprovisioned */ public List<Node> removeRecursively(String hostname) { Node node = requireNode(hostname); return removeRecursively(node, false); } public List<Node> removeRecursively(Node node, boolean force) { try (Mutex lock = lockUnallocated()) { requireRemovable(node, false, force); NestedTransaction transaction = new NestedTransaction(); final List<Node> removed; if (!node.type().isHost()) { removed = List.of(node); db.removeNodes(removed, transaction); } else { removed = removeChildren(node, force, transaction); if (zone.getCloud().dynamicProvisioning()) { db.removeNodes(List.of(node), transaction); } else { move(node.hostname(), Node.State.deprovisioned, Agent.system, false, Optional.empty(), transaction); } removed.add(node); } transaction.commit(); return removed; } } /** Forgets a deprovisioned node. This removes all traces of the node in the node repository. */ public void forget(Node node) { if (node.state() != Node.State.deprovisioned) throw new IllegalArgumentException(node + " must be deprovisioned before it can be forgotten"); if (node.status().wantToRebuild()) throw new IllegalArgumentException(node + " is rebuilding and cannot be forgotten"); NestedTransaction transaction = new NestedTransaction(); db.removeNodes(List.of(node), transaction); transaction.commit(); } private List<Node> removeChildren(Node node, boolean force, NestedTransaction transaction) { List<Node> children = list().childrenOf(node).asList(); children.forEach(child -> requireRemovable(child, true, force)); db.removeNodes(children, transaction); return new ArrayList<>(children); } /** * Throws if the given node cannot be removed. Removal is allowed if: * - Tenant node: * - non-recursively: node is unallocated * - recursively: node is unallocated or node is in failed|parked * - Host node: iff in state provisioned|failed|parked * - Child node: * - non-recursively: node in state ready * - recursively: child is in state provisioned|failed|parked|dirty|ready */ private void requireRemovable(Node node, boolean removingRecursively, boolean force) { if (force) return; if (node.type() == NodeType.tenant && node.allocation().isPresent()) { EnumSet<Node.State> removableStates = EnumSet.of(Node.State.failed, Node.State.parked); if (!removingRecursively || !removableStates.contains(node.state())) illegal(node + " is currently allocated and cannot be removed while in " + node.state()); } final Set<Node.State> removableStates; if (node.type().isHost()) { removableStates = EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked); } else { removableStates = removingRecursively ? EnumSet.of(Node.State.provisioned, Node.State.failed, Node.State.parked, Node.State.dirty, Node.State.ready) : EnumSet.of(Node.State.ready); } if (!removableStates.contains(node.state())) illegal(node + " can not be removed while in " + node.state()); } /** * Throws if given node cannot be breakfixed. * Breakfix is allowed if the following is true: * - Node is tenant host * - Node is in zone without dynamic provisioning * - Node is in parked or failed state */ private void requireBreakfixable(Node node) { if (zone.getCloud().dynamicProvisioning()) { illegal("Can not breakfix in zone: " + zone); } if (node.type() != NodeType.host) { illegal(node + " can not be breakfixed as it is not a tenant host"); } Set<Node.State> legalStates = EnumSet.of(Node.State.failed, Node.State.parked); if (! legalStates.contains(node.state())) { illegal(node + " can not be removed as it is not in the states " + legalStates); } } /** * Increases the restart generation of the active nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restartActive(Predicate<Node> filter) { return restart(NodeFilter.in(Set.of(Node.State.active)).and(filter)); } /** * Increases the restart generation of the any nodes matching given filter. * * @return the nodes in their new state */ public List<Node> restart(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withRestart(node.allocation().get().restartGeneration().withIncreasedWanted()), lock)); } /** * Increases the reboot generation of the nodes matching the filter. * * @return the nodes in their new state */ public List<Node> reboot(Predicate<Node> filter) { return performOn(filter, (node, lock) -> write(node.withReboot(node.status().reboot().withIncreasedWanted()), lock)); } /** * Set target OS version of all nodes matching given filter. * * @return the nodes in their new state */ public List<Node> upgradeOs(Predicate<Node> filter, Optional<Version> version) { return performOn(filter, (node, lock) -> { var newStatus = node.status().withOsVersion(node.status().osVersion().withWanted(version)); return write(node.with(newStatus), lock); }); } /** Retire nodes matching given filter */ public List<Node> retire(Predicate<Node> filter, Agent agent, Instant instant) { return performOn(filter, (node, lock) -> write(node.withWantToRetire(true, agent, instant), lock)); } /** Retire and deprovision given host and all of its children */ public List<Node> deprovision(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.deprovision, agent, instant); } /** Retire and rebuild given host and all of its children */ public List<Node> rebuild(String hostname, Agent agent, Instant instant) { return decommission(hostname, DecommissionOperation.rebuild, agent, instant); } private List<Node> decommission(String hostname, DecommissionOperation op, Agent agent, Instant instant) { Optional<NodeMutex> nodeMutex = lockAndGet(hostname); if (nodeMutex.isEmpty()) return List.of(); Node host = nodeMutex.get().node(); if (!host.type().isHost()) throw new IllegalArgumentException("Cannot " + op + " non-host " + host); List<Node> result; boolean wantToDeprovision = op == DecommissionOperation.deprovision; boolean wantToRebuild = op == DecommissionOperation.rebuild; try (NodeMutex lock = nodeMutex.get(); Mutex allocationLock = lockUnallocated()) { host = lock.node(); result = performOn(list(allocationLock).childrenOf(host), (node, nodeLock) -> { Node newNode = node.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); return write(newNode, nodeLock); }); Node newHost = host.withWantToRetire(true, wantToDeprovision, wantToRebuild, agent, instant); result.add(write(newHost, lock)); } return result; } /** * Writes this node after it has changed some internal state but NOT changed its state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written node for convenience */ public Node write(Node node, Mutex lock) { return write(List.of(node), lock).get(0); } /** * Writes these nodes after they have changed some internal state but NOT changed their state field. * This does NOT lock the node repository implicitly, but callers are expected to already hold the lock. * * @param lock already acquired lock * @return the written nodes for convenience */ public List<Node> write(List<Node> nodes, @SuppressWarnings("unused") Mutex lock) { return db.writeTo(nodes, Agent.system, Optional.empty()); } private List<Node> performOn(Predicate<Node> filter, BiFunction<Node, Mutex, Node> action) { return performOn(list().matching(filter), action); } /** * Performs an operation requiring locking on all nodes matching some filter. * * @param action the action to perform * @return the set of nodes on which the action was performed, as they became as a result of the operation */ private List<Node> performOn(NodeList nodes, BiFunction<Node, Mutex, Node> action) { List<Node> unallocatedNodes = new ArrayList<>(); ListMap<ApplicationId, Node> allocatedNodes = new ListMap<>(); for (Node node : nodes) { if (node.allocation().isPresent()) allocatedNodes.put(node.allocation().get().owner(), node); else unallocatedNodes.add(node); } List<Node> resultingNodes = new ArrayList<>(); try (Mutex lock = lockUnallocated()) { for (Node node : unallocatedNodes) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } for (Map.Entry<ApplicationId, List<Node>> applicationNodes : allocatedNodes.entrySet()) { try (Mutex lock = lock(applicationNodes.getKey())) { for (Node node : applicationNodes.getValue()) { Optional<Node> currentNode = db.readNode(node.hostname()); if (currentNode.isEmpty()) continue; resultingNodes.add(action.apply(currentNode.get(), lock)); } } } return resultingNodes; } public boolean canAllocateTenantNodeTo(Node host) { return canAllocateTenantNodeTo(host, zone.getCloud().dynamicProvisioning()); } public boolean suspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Create a lock which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application) { return db.lock(application); } /** Create a lock with a timeout which provides exclusive rights to making changes to the given application */ public Mutex lock(ApplicationId application, Duration timeout) { return db.lock(application, timeout); } /** Create a lock which provides exclusive rights to modifying unallocated nodes */ public Mutex lockUnallocated() { return db.lockInactive(); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(Node node) { Node staleNode = node; final int maxRetries = 4; for (int i = 0; i < maxRetries; ++i) { Mutex lockToClose = lock(staleNode); try { Optional<Node> freshNode = node(staleNode.hostname(), staleNode.state()); if (freshNode.isEmpty()) { freshNode = node(staleNode.hostname()); if (freshNode.isEmpty()) { return Optional.empty(); } } if (Objects.equals(freshNode.get().allocation().map(Allocation::owner), staleNode.allocation().map(Allocation::owner))) { NodeMutex nodeMutex = new NodeMutex(freshNode.get(), lockToClose); lockToClose = null; return Optional.of(nodeMutex); } staleNode = freshNode.get(); } finally { if (lockToClose != null) lockToClose.close(); } } throw new IllegalStateException("Giving up (after " + maxRetries + " attempts) " + "fetching an up to date node under lock: " + node.hostname()); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public Optional<NodeMutex> lockAndGet(String hostname) { return node(hostname).flatMap(this::lockAndGet); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(Node node) { return lockAndGet(node).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + node.hostname() + "'")); } /** Returns the unallocated/application lock, and the node acquired under that lock. */ public NodeMutex lockAndGetRequired(String hostname) { return lockAndGet(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private Mutex lock(Node node) { return node.allocation().isPresent() ? lock(node.allocation().get().owner()) : lockUnallocated(); } private Node requireNode(String hostname) { return node(hostname).orElseThrow(() -> new NoSuchNodeException("No node with hostname '" + hostname + "'")); } private void illegal(String message) { throw new IllegalArgumentException(message); } /** Returns whether node should be parked when deallocated by given agent */ private static boolean parkOnDeallocationOf(Node node, Agent agent) { if (node.state() == Node.State.parked) return false; if (agent == Agent.operator) return false; if (!node.type().isHost() && node.status().wantToDeprovision()) return false; boolean retirementRequestedByOperator = node.status().wantToRetire() && node.history().event(History.Event.Type.wantToRetire) .map(History.Event::agent) .map(a -> a == Agent.operator) .orElse(false); return node.status().wantToDeprovision() || node.status().wantToRebuild() || retirementRequestedByOperator; } /** The different ways a host can be decommissioned */ private enum DecommissionOperation { deprovision, rebuild, } }
Hmm, these have a separate counter per job ... Urgh :@
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } controller.jobController().deploymentStatus(application.get()); for (Notification notification : controller.notificationsDb().listNotifications(NotificationSource.from(application.get().id()), true)) { if ( ! notification.source().instance().map(declaredInstances::contains).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); if (notification.source().instance().isPresent() && ! notification.source().zoneId().map(application.get().require(notification.source().instance().get()).deployments()::containsKey).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); } var oldestVersionByDirectlyDeployed = application.get() .instances() .values() .stream() .flatMap(instance -> instance.deployments().values().stream()) .map(Deployment::applicationVersion) .collect(toMap( ApplicationVersion::isDeployedDirectly, Function.identity(), (v1, v2) -> v1.compareTo(v2) < 0 ? v1 : v2 )); var olderVersions = application.get().versions() .stream() .filter(version -> version.compareTo(oldestVersionByDirectlyDeployed.getOrDefault(version.isDeployedDirectly(), ApplicationVersion.unknown)) < 0) .sorted() .collect(Collectors.toList()); for (int i = 0; i < olderVersions.size() - 1; i++) { application = application.withoutVersion(olderVersions.get(i)); } store(application); return application; }
(v1, v2) -> v1.compareTo(v2) < 0 ? v1 : v2
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } controller.jobController().deploymentStatus(application.get()); for (Notification notification : controller.notificationsDb().listNotifications(NotificationSource.from(application.get().id()), true)) { if ( ! notification.source().instance().map(declaredInstances::contains).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); if (notification.source().instance().isPresent() && ! notification.source().zoneId().map(application.get().require(notification.source().instance().get()).deployments()::containsKey).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); } var oldestDeployedVersion = application.get() .instances() .values() .stream() .flatMap(instance -> instance.deployments().values().stream()) .map(Deployment::applicationVersion) .filter(version -> !version.isDeployedDirectly()) .sorted() .findFirst() .orElse(ApplicationVersion.unknown); var olderVersions = application.get().versions() .stream() .filter(version -> version.compareTo(oldestDeployedVersion) < 0) .sorted() .collect(Collectors.toList()); for (int i = 0; i < olderVersions.size() - 1; i++) { application = application.withoutVersion(olderVersions.get(i)); } store(application); return application; }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificates endpointCertificates; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = Objects.requireNonNull(controller); this.curator = Objects.requireNonNull(curator); this.accessControl = Objects.requireNonNull(accessControl); this.configServer = controller.serviceRegistry().configServer(); this.clock = Objects.requireNonNull(clock); this.billingController = Objects.requireNonNull(billingController); artifactRepository = controller.serviceRegistry().artifactRepository(); applicationStore = controller.serviceRegistry().applicationStore(); dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificates = new EndpointCertificates(controller, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator()); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, Text.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly, Double speed) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly, speed); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), NodeFilter.all() .applications(job.application()) .states(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); DeploymentId deployment = new DeploymentId(job.application(), zone); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(deployment, revision)); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec()); containerEndpoints = controller.routing().of(deployment).prepare(application); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, run.isDryRun()); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(deployment) : NotificationSource.from(applicationId); @SuppressWarnings("deprecation") List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream() .filter(log -> log.applicationPackage) .filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()) .map(log -> log.message) .sorted() .distinct() .collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage); else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version, boolean allowDowngrade) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version, allowDowngrade); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), false); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), false); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, boolean dryRun) { DeploymentId deployment = new DeploymentId(application, zone); try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(deployment, clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(deployment).stream() .map(SupportAccessGrant::certificate) .collect(toList()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, deploymentQuota, tenantSecretStores, operatorCertificates, dryRun)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { if ( ! application.instance().isTester()) { controller.routing().of(deployment).configure(applicationPackage.deploymentSpec()); } } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { deleteApplication(id, Optional.of(credentials)); } public void deleteApplication(TenantAndApplicationId id, Optional<Credentials> credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); credentials.ifPresent(creds -> accessControl.deleteApplication(id, creds)); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().of(id).configure(application.get().deploymentSpec()); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); if (!zone.environment().isTest()) controller.notificationsDb().removeNotifications(NotificationSource.from(id)); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major, which is not newer than the system version. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { VersionStatus versions = controller.readVersionStatus(); Version systemVersion = controller.systemVersion(versions); return versions.versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .filter(version -> ! version.isAfter(systemVersion)) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificates endpointCertificates; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = Objects.requireNonNull(controller); this.curator = Objects.requireNonNull(curator); this.accessControl = Objects.requireNonNull(accessControl); this.configServer = controller.serviceRegistry().configServer(); this.clock = Objects.requireNonNull(clock); this.billingController = Objects.requireNonNull(billingController); artifactRepository = controller.serviceRegistry().artifactRepository(); applicationStore = controller.serviceRegistry().applicationStore(); dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificates = new EndpointCertificates(controller, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator()); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, Text.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly, Double speed) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly, speed); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), NodeFilter.all() .applications(job.application()) .states(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); DeploymentId deployment = new DeploymentId(job.application(), zone); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(deployment, revision)); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec()); containerEndpoints = controller.routing().of(deployment).prepare(application); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, run.isDryRun()); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(deployment) : NotificationSource.from(applicationId); @SuppressWarnings("deprecation") List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream() .filter(log -> log.applicationPackage) .filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()) .map(log -> log.message) .sorted() .distinct() .collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage); else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version, boolean allowDowngrade) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version, allowDowngrade); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), false); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), false); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, boolean dryRun) { DeploymentId deployment = new DeploymentId(application, zone); try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(deployment, clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(deployment).stream() .map(SupportAccessGrant::certificate) .collect(toList()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, deploymentQuota, tenantSecretStores, operatorCertificates, dryRun)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { if ( ! application.instance().isTester()) { controller.routing().of(deployment).configure(applicationPackage.deploymentSpec()); } } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { deleteApplication(id, Optional.of(credentials)); } public void deleteApplication(TenantAndApplicationId id, Optional<Credentials> credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); credentials.ifPresent(creds -> accessControl.deleteApplication(id, creds)); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().of(id).configure(application.get().deploymentSpec()); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); if (!zone.environment().isTest()) controller.notificationsDb().removeNotifications(NotificationSource.from(id)); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major, which is not newer than the system version. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { VersionStatus versions = controller.readVersionStatus(); Version systemVersion = controller.systemVersion(versions); return versions.versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .filter(version -> ! version.isAfter(systemVersion)) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
The directly deployed ones, that is.
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } controller.jobController().deploymentStatus(application.get()); for (Notification notification : controller.notificationsDb().listNotifications(NotificationSource.from(application.get().id()), true)) { if ( ! notification.source().instance().map(declaredInstances::contains).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); if (notification.source().instance().isPresent() && ! notification.source().zoneId().map(application.get().require(notification.source().instance().get()).deployments()::containsKey).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); } var oldestVersionByDirectlyDeployed = application.get() .instances() .values() .stream() .flatMap(instance -> instance.deployments().values().stream()) .map(Deployment::applicationVersion) .collect(toMap( ApplicationVersion::isDeployedDirectly, Function.identity(), (v1, v2) -> v1.compareTo(v2) < 0 ? v1 : v2 )); var olderVersions = application.get().versions() .stream() .filter(version -> version.compareTo(oldestVersionByDirectlyDeployed.getOrDefault(version.isDeployedDirectly(), ApplicationVersion.unknown)) < 0) .sorted() .collect(Collectors.toList()); for (int i = 0; i < olderVersions.size() - 1; i++) { application = application.withoutVersion(olderVersions.get(i)); } store(application); return application; }
(v1, v2) -> v1.compareTo(v2) < 0 ? v1 : v2
public LockedApplication storeWithUpdatedConfig(LockedApplication application, ApplicationPackage applicationPackage) { applicationPackageValidator.validate(application.get(), applicationPackage, clock.instant()); application = application.with(applicationPackage.deploymentSpec()); application = application.with(applicationPackage.validationOverrides()); var existingInstances = application.get().instances().keySet(); var declaredInstances = applicationPackage.deploymentSpec().instanceNames(); for (var name : declaredInstances) if ( ! existingInstances.contains(name)) application = withNewInstance(application, application.get().id().instance(name)); for (InstanceName name : existingInstances) { application = withoutDeletedDeployments(application, name); } controller.jobController().deploymentStatus(application.get()); for (Notification notification : controller.notificationsDb().listNotifications(NotificationSource.from(application.get().id()), true)) { if ( ! notification.source().instance().map(declaredInstances::contains).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); if (notification.source().instance().isPresent() && ! notification.source().zoneId().map(application.get().require(notification.source().instance().get()).deployments()::containsKey).orElse(false)) controller.notificationsDb().removeNotifications(notification.source()); } var oldestDeployedVersion = application.get() .instances() .values() .stream() .flatMap(instance -> instance.deployments().values().stream()) .map(Deployment::applicationVersion) .filter(version -> !version.isDeployedDirectly()) .sorted() .findFirst() .orElse(ApplicationVersion.unknown); var olderVersions = application.get().versions() .stream() .filter(version -> version.compareTo(oldestDeployedVersion) < 0) .sorted() .collect(Collectors.toList()); for (int i = 0; i < olderVersions.size() - 1; i++) { application = application.withoutVersion(olderVersions.get(i)); } store(application); return application; }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificates endpointCertificates; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = Objects.requireNonNull(controller); this.curator = Objects.requireNonNull(curator); this.accessControl = Objects.requireNonNull(accessControl); this.configServer = controller.serviceRegistry().configServer(); this.clock = Objects.requireNonNull(clock); this.billingController = Objects.requireNonNull(billingController); artifactRepository = controller.serviceRegistry().artifactRepository(); applicationStore = controller.serviceRegistry().applicationStore(); dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificates = new EndpointCertificates(controller, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator()); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, Text.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly, Double speed) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly, speed); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), NodeFilter.all() .applications(job.application()) .states(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); DeploymentId deployment = new DeploymentId(job.application(), zone); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(deployment, revision)); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec()); containerEndpoints = controller.routing().of(deployment).prepare(application); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, run.isDryRun()); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(deployment) : NotificationSource.from(applicationId); @SuppressWarnings("deprecation") List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream() .filter(log -> log.applicationPackage) .filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()) .map(log -> log.message) .sorted() .distinct() .collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage); else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version, boolean allowDowngrade) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version, allowDowngrade); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), false); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), false); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, boolean dryRun) { DeploymentId deployment = new DeploymentId(application, zone); try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(deployment, clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(deployment).stream() .map(SupportAccessGrant::certificate) .collect(toList()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, deploymentQuota, tenantSecretStores, operatorCertificates, dryRun)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { if ( ! application.instance().isTester()) { controller.routing().of(deployment).configure(applicationPackage.deploymentSpec()); } } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { deleteApplication(id, Optional.of(credentials)); } public void deleteApplication(TenantAndApplicationId id, Optional<Credentials> credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); credentials.ifPresent(creds -> accessControl.deleteApplication(id, creds)); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().of(id).configure(application.get().deploymentSpec()); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); if (!zone.environment().isTest()) controller.notificationsDb().removeNotifications(NotificationSource.from(id)); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major, which is not newer than the system version. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { VersionStatus versions = controller.readVersionStatus(); Version systemVersion = controller.systemVersion(versions); return versions.versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .filter(version -> ! version.isAfter(systemVersion)) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
class ApplicationController { private static final Logger log = Logger.getLogger(ApplicationController.class.getName()); /** The controller owning this */ private final Controller controller; /** For persistence */ private final CuratorDb curator; private final ArtifactRepository artifactRepository; private final ApplicationStore applicationStore; private final AccessControl accessControl; private final ConfigServer configServer; private final Clock clock; private final DeploymentTrigger deploymentTrigger; private final ApplicationPackageValidator applicationPackageValidator; private final EndpointCertificates endpointCertificates; private final StringFlag dockerImageRepoFlag; private final BillingController billingController; ApplicationController(Controller controller, CuratorDb curator, AccessControl accessControl, Clock clock, FlagSource flagSource, BillingController billingController) { this.controller = Objects.requireNonNull(controller); this.curator = Objects.requireNonNull(curator); this.accessControl = Objects.requireNonNull(accessControl); this.configServer = controller.serviceRegistry().configServer(); this.clock = Objects.requireNonNull(clock); this.billingController = Objects.requireNonNull(billingController); artifactRepository = controller.serviceRegistry().artifactRepository(); applicationStore = controller.serviceRegistry().applicationStore(); dockerImageRepoFlag = PermanentFlags.DOCKER_IMAGE_REPO.bindTo(flagSource); deploymentTrigger = new DeploymentTrigger(controller, clock); applicationPackageValidator = new ApplicationPackageValidator(controller); endpointCertificates = new EndpointCertificates(controller, controller.serviceRegistry().endpointCertificateProvider(), controller.serviceRegistry().endpointCertificateValidator()); Once.after(Duration.ofMinutes(1), () -> { Instant start = clock.instant(); int count = 0; for (TenantAndApplicationId id : curator.readApplicationIds()) { lockApplicationIfPresent(id, application -> { for (InstanceName instance : application.get().deploymentSpec().instanceNames()) if (!application.get().instances().containsKey(instance)) application = withNewInstance(application, id.instance(instance)); store(application); }); count++; } log.log(Level.INFO, Text.format("Wrote %d applications in %s", count, Duration.between(start, clock.instant()))); }); } /** Returns the application with the given id, or null if it is not present */ public Optional<Application> getApplication(TenantAndApplicationId id) { return curator.readApplication(id); } /** Returns the instance with the given id, or null if it is not present */ public Optional<Instance> getInstance(ApplicationId id) { return getApplication(TenantAndApplicationId.from(id)).flatMap(application -> application.get(id.instance())); } /** * Triggers reindexing for the given document types in the given clusters, for the given application. * * If no clusters are given, reindexing is triggered for the entire application; otherwise * if no documents types are given, reindexing is triggered for all given clusters; otherwise * reindexing is triggered for the cartesian product of the given clusters and document types. */ public void reindex(ApplicationId id, ZoneId zoneId, List<String> clusterNames, List<String> documentTypes, boolean indexedOnly, Double speed) { configServer.reindex(new DeploymentId(id, zoneId), clusterNames, documentTypes, indexedOnly, speed); } /** Returns the reindexing status for the given application in the given zone. */ public ApplicationReindexing applicationReindexing(ApplicationId id, ZoneId zoneId) { return configServer.getReindexing(new DeploymentId(id, zoneId)); } /** Enables reindexing for the given application in the given zone. */ public void enableReindexing(ApplicationId id, ZoneId zoneId) { configServer.enableReindexing(new DeploymentId(id, zoneId)); } /** Disables reindexing for the given application in the given zone. */ public void disableReindexing(ApplicationId id, ZoneId zoneId) { configServer.disableReindexing(new DeploymentId(id, zoneId)); } /** * Returns the application with the given id * * @throws IllegalArgumentException if it does not exist */ public Application requireApplication(TenantAndApplicationId id) { return getApplication(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** * Returns the instance with the given id * * @throws IllegalArgumentException if it does not exist */ public Instance requireInstance(ApplicationId id) { return getInstance(id).orElseThrow(() -> new IllegalArgumentException(id + " not found")); } /** Returns a snapshot of all applications */ public List<Application> asList() { return curator.readApplications(false); } /** * Returns a snapshot of all readable applications. Unlike {@link ApplicationController * applications that cannot currently be read (e.g. due to serialization issues) and may return an incomplete * snapshot. * * This should only be used in cases where acting on a subset of applications is better than none. */ public List<Application> readable() { return curator.readApplications(true); } /** Returns the ID of all known applications. */ public List<TenantAndApplicationId> idList() { return curator.readApplicationIds(); } /** Returns a snapshot of all applications of a tenant */ public List<Application> asList(TenantName tenant) { return curator.readApplications(tenant); } public ArtifactRepository artifacts() { return artifactRepository; } public ApplicationStore applicationStore() { return applicationStore; } /** Returns all currently reachable content clusters among the given deployments. */ public Map<ZoneId, List<String>> reachableContentClustersByZone(Collection<DeploymentId> ids) { Map<ZoneId, List<String>> clusters = new TreeMap<>(Comparator.comparing(ZoneId::value)); for (DeploymentId id : ids) if (isHealthy(id)) clusters.put(id.zoneId(), List.copyOf(configServer.getContentClusters(id))); return Collections.unmodifiableMap(clusters); } /** Reads the oldest installed platform for the given application and zone from job history, or a node repo. */ private Optional<Version> oldestInstalledPlatform(JobStatus job) { Version oldest = null; for (Run run : job.runs().descendingMap().values()) { Version version = run.versions().targetPlatform(); if (oldest == null || version.isBefore(oldest)) oldest = version; if (run.status() == RunStatus.success) return Optional.of(oldest); } return oldestInstalledPlatform(job.id()); } /** Reads the oldest installed platform for the given application and zone from the node repo of that zone. */ private Optional<Version> oldestInstalledPlatform(JobId job) { return configServer.nodeRepository().list(job.type().zone(controller.system()), NodeFilter.all() .applications(job.application()) .states(active, reserved)) .stream() .map(Node::currentVersion) .filter(version -> ! version.isEmpty()) .min(naturalOrder()); } /** Returns the oldest Vespa version installed on any active or reserved production node for the given application. */ public Version oldestInstalledPlatform(TenantAndApplicationId id) { return controller.jobController().deploymentStatus(requireApplication(id)).jobs() .production().asList().stream() .map(this::oldestInstalledPlatform) .flatMap(Optional::stream) .min(naturalOrder()) .orElse(controller.readSystemVersion()); } /** * Creates a new application for an existing tenant. * * @throws IllegalArgumentException if the application already exists */ public Application createApplication(TenantAndApplicationId id, Credentials credentials) { try (Lock lock = lock(id)) { if (getApplication(id).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application already exists"); if (getApplication(dashToUnderscore(id)).isPresent()) throw new IllegalArgumentException("Could not create '" + id + "': Application " + dashToUnderscore(id) + " already exists"); com.yahoo.vespa.hosted.controller.api.identifiers.ApplicationId.validate(id.application().value()); if (controller.tenants().get(id.tenant()).isEmpty()) throw new IllegalArgumentException("Could not create '" + id + "': This tenant does not exist"); accessControl.createApplication(id, credentials); LockedApplication locked = new LockedApplication(new Application(id, clock.instant()), lock); store(locked); log.info("Created " + locked); return locked.get(); } } /** * Creates a new instance for an existing application. * * @throws IllegalArgumentException if the instance already exists, or has an invalid instance name. */ public void createInstance(ApplicationId id) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { store(withNewInstance(application, id)); }); } /** Returns given application with a new instance */ public LockedApplication withNewInstance(LockedApplication application, ApplicationId instance) { if (instance.instance().isTester()) throw new IllegalArgumentException("'" + instance + "' is a tester application!"); InstanceId.validate(instance.instance().value()); if (getInstance(instance).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance already exists"); if (getInstance(dashToUnderscore(instance)).isPresent()) throw new IllegalArgumentException("Could not create '" + instance + "': Instance " + dashToUnderscore(instance) + " already exists"); log.info("Created " + instance); return application.withNewInstance(instance.instance()); } /** Deploys an application package for an existing application instance. */ public ActivateResult deploy(JobId job, boolean deploySourceVersions) { if (job.application().instance().isTester()) throw new IllegalArgumentException("'" + job.application() + "' is a tester application!"); TenantAndApplicationId applicationId = TenantAndApplicationId.from(job.application()); ZoneId zone = job.type().zone(controller.system()); DeploymentId deployment = new DeploymentId(job.application(), zone); try (Lock deploymentLock = lockForDeployment(job.application(), zone)) { Set<ContainerEndpoint> containerEndpoints; Optional<EndpointCertificateMetadata> endpointCertificateMetadata; Run run = controller.jobController().last(job) .orElseThrow(() -> new IllegalStateException("No known run of '" + job + "'")); if (run.hasEnded()) throw new IllegalStateException("No deployment expected for " + job + " now, as no job is running"); Version platform = run.versions().sourcePlatform().filter(__ -> deploySourceVersions).orElse(run.versions().targetPlatform()); ApplicationVersion revision = run.versions().sourceApplication().filter(__ -> deploySourceVersions).orElse(run.versions().targetApplication()); ApplicationPackage applicationPackage = new ApplicationPackage(applicationStore.get(deployment, revision)); try (Lock lock = lock(applicationId)) { LockedApplication application = new LockedApplication(requireApplication(applicationId), lock); Instance instance = application.get().require(job.application().instance()); if ( ! applicationPackage.trustedCertificates().isEmpty() && run.testerCertificate().isPresent()) applicationPackage = applicationPackage.withTrustedCertificate(run.testerCertificate().get()); endpointCertificateMetadata = endpointCertificates.getMetadata(instance, zone, applicationPackage.deploymentSpec()); containerEndpoints = controller.routing().of(deployment).prepare(application); } ActivateResult result = deploy(job.application(), applicationPackage, zone, platform, containerEndpoints, endpointCertificateMetadata, run.isDryRun()); var quotaUsage = deploymentQuotaUsage(zone, job.application()); NotificationSource source = zone.environment().isManuallyDeployed() ? NotificationSource.from(deployment) : NotificationSource.from(applicationId); @SuppressWarnings("deprecation") List<String> warnings = Optional.ofNullable(result.prepareResponse().log) .map(logs -> logs.stream() .filter(log -> log.applicationPackage) .filter(log -> LogLevel.parse(log.level).intValue() >= Level.WARNING.intValue()) .map(log -> log.message) .sorted() .distinct() .collect(Collectors.toList())) .orElseGet(List::of); if (warnings.isEmpty()) controller.notificationsDb().removeNotification(source, Notification.Type.applicationPackage); else controller.notificationsDb().setNotification(source, Notification.Type.applicationPackage, Notification.Level.warning, warnings); lockApplicationOrThrow(applicationId, application -> store(application.with(job.application().instance(), instance -> instance.withNewDeployment(zone, revision, platform, clock.instant(), warningsFrom(result), quotaUsage)))); return result; } } /** Stores the deployment spec and validation overrides from the application package, and runs cleanup. */ /** Deploy a system application to given zone */ public void deploy(SystemApplication application, ZoneId zone, Version version, boolean allowDowngrade) { if (application.hasApplicationPackage()) { deploySystemApplicationPackage(application, zone, version); } else { configServer.nodeRepository().upgrade(zone, application.nodeType(), version, allowDowngrade); } } /** Deploy a system application to given zone */ public ActivateResult deploySystemApplicationPackage(SystemApplication application, ZoneId zone, Version version) { if (application.hasApplicationPackage()) { ApplicationPackage applicationPackage = new ApplicationPackage( artifactRepository.getSystemApplicationPackage(application.id(), zone, version) ); return deploy(application.id(), applicationPackage, zone, version, Set.of(), /* No application cert */ Optional.empty(), false); } else { throw new RuntimeException("This system application does not have an application package: " + application.id().toShortString()); } } /** Deploys the given tester application to the given zone. */ public ActivateResult deployTester(TesterId tester, ApplicationPackage applicationPackage, ZoneId zone, Version platform) { return deploy(tester.id(), applicationPackage, zone, platform, Set.of(), /* No application cert for tester*/ Optional.empty(), false); } private ActivateResult deploy(ApplicationId application, ApplicationPackage applicationPackage, ZoneId zone, Version platform, Set<ContainerEndpoint> endpoints, Optional<EndpointCertificateMetadata> endpointCertificateMetadata, boolean dryRun) { DeploymentId deployment = new DeploymentId(application, zone); try { Optional<DockerImage> dockerImageRepo = Optional.ofNullable( dockerImageRepoFlag .with(FetchVector.Dimension.ZONE_ID, zone.value()) .with(FetchVector.Dimension.APPLICATION_ID, application.serializedForm()) .value()) .filter(s -> !s.isBlank()) .map(DockerImage::fromString); Optional<AthenzDomain> domain = controller.tenants().get(application.tenant()) .filter(tenant-> tenant instanceof AthenzTenant) .map(tenant -> ((AthenzTenant)tenant).domain()); if (zone.environment().isManuallyDeployed()) controller.applications().applicationStore().putMeta(deployment, clock.instant(), applicationPackage.metaDataZip()); Quota deploymentQuota = DeploymentQuotaCalculator.calculate(billingController.getQuota(application.tenant()), asList(application.tenant()), application, zone, applicationPackage.deploymentSpec()); List<TenantSecretStore> tenantSecretStores = controller.tenants() .get(application.tenant()) .filter(tenant-> tenant instanceof CloudTenant) .map(tenant -> ((CloudTenant) tenant).tenantSecretStores()) .orElse(List.of()); List<X509Certificate> operatorCertificates = controller.supportAccess().activeGrantsFor(deployment).stream() .map(SupportAccessGrant::certificate) .collect(toList()); ConfigServer.PreparedApplication preparedApplication = configServer.deploy(new DeploymentData(application, zone, applicationPackage.zippedContent(), platform, endpoints, endpointCertificateMetadata, dockerImageRepo, domain, deploymentQuota, tenantSecretStores, operatorCertificates, dryRun)); return new ActivateResult(new RevisionId(applicationPackage.hash()), preparedApplication.prepareResponse(), applicationPackage.zippedContent().length); } finally { if ( ! application.instance().isTester()) { controller.routing().of(deployment).configure(applicationPackage.deploymentSpec()); } } } private LockedApplication withoutDeletedDeployments(LockedApplication application, InstanceName instance) { DeploymentSpec deploymentSpec = application.get().deploymentSpec(); List<ZoneId> deploymentsToRemove = application.get().require(instance).productionDeployments().values().stream() .map(Deployment::zone) .filter(zone -> deploymentSpec.instance(instance).isEmpty() || ! deploymentSpec.requireInstance(instance).deploysTo(zone.environment(), zone.region())) .collect(toList()); if (deploymentsToRemove.isEmpty()) return application; if ( ! application.get().validationOverrides().allows(ValidationId.deploymentRemoval, clock.instant())) throw new IllegalArgumentException(ValidationId.deploymentRemoval.value() + ": " + application.get().require(instance) + " is deployed in " + deploymentsToRemove.stream() .map(zone -> zone.region().value()) .collect(joining(", ")) + ", but does not include " + (deploymentsToRemove.size() > 1 ? "these zones" : "this zone") + " in deployment.xml. " + ValidationOverrides.toAllowMessage(ValidationId.deploymentRemoval)); boolean removeInstance = ! deploymentSpec.instanceNames().contains(instance) && application.get().require(instance).deployments().size() == deploymentsToRemove.size(); for (ZoneId zone : deploymentsToRemove) application = deactivate(application, instance, zone); if (removeInstance) application = application.without(instance); return application; } /** * Deletes the the given application. All known instances of the applications will be deleted. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized */ public void deleteApplication(TenantAndApplicationId id, Credentials credentials) { deleteApplication(id, Optional.of(credentials)); } public void deleteApplication(TenantAndApplicationId id, Optional<Credentials> credentials) { lockApplicationOrThrow(id, application -> { var deployments = application.get().instances().values().stream() .filter(instance -> ! instance.deployments().isEmpty()) .collect(toMap(instance -> instance.name(), instance -> instance.deployments().keySet().stream() .map(ZoneId::toString) .collect(joining(", ")))); if ( ! deployments.isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments: " + deployments); for (Instance instance : application.get().instances().values()) { controller.routing().removeEndpointsInDns(application.get(), instance.name()); application = application.without(instance.name()); } applicationStore.removeAll(id.tenant(), id.application()); applicationStore.removeAllTesters(id.tenant(), id.application()); applicationStore.putMetaTombstone(id.tenant(), id.application(), clock.instant()); credentials.ifPresent(creds -> accessControl.deleteApplication(id, creds)); curator.removeApplication(id); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(id)); log.info("Deleted " + id); }); } /** * Deletes the the given application instance. * * @throws IllegalArgumentException if the application has deployments or the caller is not authorized * @throws NotExistsException if the instance does not exist */ public void deleteInstance(ApplicationId instanceId) { if (getInstance(instanceId).isEmpty()) throw new NotExistsException("Could not delete instance '" + instanceId + "': Instance not found"); lockApplicationOrThrow(TenantAndApplicationId.from(instanceId), application -> { if ( ! application.get().require(instanceId.instance()).deployments().isEmpty()) throw new IllegalArgumentException("Could not delete '" + application + "': It has active deployments in: " + application.get().require(instanceId.instance()).deployments().keySet().stream().map(ZoneId::toString) .sorted().collect(joining(", "))); if ( ! application.get().deploymentSpec().equals(DeploymentSpec.empty) && application.get().deploymentSpec().instanceNames().contains(instanceId.instance())) throw new IllegalArgumentException("Can not delete '" + instanceId + "', which is specified in 'deployment.xml'; remove it there first"); controller.routing().removeEndpointsInDns(application.get(), instanceId.instance()); curator.writeApplication(application.without(instanceId.instance()).get()); controller.jobController().collectGarbage(); controller.notificationsDb().removeNotifications(NotificationSource.from(instanceId)); log.info("Deleted " + instanceId); }); } /** * Replace any previous version of this application by this instance * * @param application a locked application to store */ public void store(LockedApplication application) { curator.writeApplication(application.get()); } /** * Acquire a locked application to modify and store, if there is an application with the given id. * * @param applicationId ID of the application to lock and get. * @param action Function which acts on the locked application. */ public void lockApplicationIfPresent(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { getApplication(applicationId).map(application -> new LockedApplication(application, lock)).ifPresent(action); } } /** * Acquire a locked application to modify and store, or throw an exception if no application has the given id. * * @param applicationId ID of the application to lock and require. * @param action Function which acts on the locked application. * @throws IllegalArgumentException when application does not exist. */ public void lockApplicationOrThrow(TenantAndApplicationId applicationId, Consumer<LockedApplication> action) { try (Lock lock = lock(applicationId)) { action.accept(new LockedApplication(requireApplication(applicationId), lock)); } } /** * Tells config server to schedule a restart of all nodes in this deployment * * @param restartFilter Variables to filter which nodes to restart. */ public void restart(DeploymentId deploymentId, RestartFilter restartFilter) { configServer.restart(deploymentId, restartFilter); } /** * Asks the config server whether this deployment is currently healthy, i.e., serving traffic as usual. * If this cannot be ascertained, we must assumed it is not. */ public boolean isHealthy(DeploymentId deploymentId) { try { return ! isSuspended(deploymentId); } catch (RuntimeException e) { log.log(Level.WARNING, "Failed getting suspension status of " + deploymentId + ": " + Exceptions.toMessageString(e)); return false; } } /** * Asks the config server whether this deployment is currently <i>suspended</i>: * Not in a state where it should receive traffic. */ public boolean isSuspended(DeploymentId deploymentId) { return configServer.isSuspended(deploymentId); } /** Sets suspension status of the given deployment in its zone. */ public void setSuspension(DeploymentId deploymentId, boolean suspend) { configServer.setSuspension(deploymentId, suspend); } /** Deactivate application in the given zone */ public void deactivate(ApplicationId id, ZoneId zone) { lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> store(deactivate(application, id.instance(), zone))); } /** * Deactivates a locked application without storing it * * @return the application with the deployment in the given zone removed */ private LockedApplication deactivate(LockedApplication application, InstanceName instanceName, ZoneId zone) { DeploymentId id = new DeploymentId(application.get().id().instance(instanceName), zone); try { configServer.deactivate(id); } finally { controller.routing().of(id).configure(application.get().deploymentSpec()); if (zone.environment().isManuallyDeployed()) applicationStore.putMetaTombstone(id, clock.instant()); if (!zone.environment().isTest()) controller.notificationsDb().removeNotifications(NotificationSource.from(id)); } return application.with(instanceName, instance -> instance.withoutDeploymentIn(zone)); } public DeploymentTrigger deploymentTrigger() { return deploymentTrigger; } /** * Returns a lock which provides exclusive rights to changing this application. * Any operation which stores an application need to first acquire this lock, then read, modify * and store the application, and finally release (close) the lock. */ Lock lock(TenantAndApplicationId application) { return curator.lock(application); } /** * Returns a lock which provides exclusive rights to deploying this application to the given zone. */ private Lock lockForDeployment(ApplicationId application, ZoneId zone) { return curator.lockForDeployment(application, zone); } /** * Verifies that the application can be deployed to the tenant, following these rules: * * 1. Verify that the Athenz service can be launched by the config server * 2. If the principal is given, verify that the principal is tenant admin or admin of the tenant domain * 3. If the principal is not given, verify that the Athenz domain of the tenant equals Athenz domain given in deployment.xml * * @param tenantName tenant where application should be deployed * @param applicationPackage application package * @param deployer principal initiating the deployment, possibly empty */ public void verifyApplicationIdentityConfiguration(TenantName tenantName, Optional<InstanceName> instanceName, Optional<ZoneId> zoneId, ApplicationPackage applicationPackage, Optional<Principal> deployer) { Optional<AthenzDomain> identityDomain = applicationPackage.deploymentSpec().athenzDomain() .map(domain -> new AthenzDomain(domain.value())); if(identityDomain.isEmpty()) { return; } if(! (accessControl instanceof AthenzFacade)) { throw new IllegalArgumentException("Athenz domain and service specified in deployment.xml, but not supported by system."); } verifyAllowedLaunchAthenzService(applicationPackage.deploymentSpec()); Optional<AthenzUser> athenzUser = getUser(deployer); if (athenzUser.isPresent()) { var zone = zoneId.orElseThrow(() -> new IllegalArgumentException("Unable to evaluate access, no zone provided in deployment")); var serviceToLaunch = instanceName .flatMap(instance -> applicationPackage.deploymentSpec().instance(instance)) .flatMap(instanceSpec -> instanceSpec.athenzService(zone.environment(), zone.region())) .or(() -> applicationPackage.deploymentSpec().athenzService()) .map(service -> new AthenzService(identityDomain.get(), service.value())); if(serviceToLaunch.isPresent()) { if ( ! ((AthenzFacade) accessControl).canLaunch(athenzUser.get(), serviceToLaunch.get()) && ! ((AthenzFacade) accessControl).hasTenantAdminAccess(athenzUser.get(), identityDomain.get()) ) { throw new IllegalArgumentException("User " + athenzUser.get().getFullName() + " is not allowed to launch " + "service " + serviceToLaunch.get().getFullName() + ". " + "Please reach out to the domain admin."); } } else { throw new IllegalArgumentException("Athenz domain configured, but no service defined for deployment to " + zone.value()); } } else { Tenant tenant = controller.tenants().require(tenantName); AthenzDomain tenantDomain = ((AthenzTenant) tenant).domain(); if ( ! Objects.equals(tenantDomain, identityDomain.get())) throw new IllegalArgumentException("Athenz domain in deployment.xml: [" + identityDomain.get().getName() + "] " + "must match tenant domain: [" + tenantDomain.getName() + "]"); } } private TenantAndApplicationId dashToUnderscore(TenantAndApplicationId id) { return TenantAndApplicationId.from(id.tenant().value(), id.application().value().replaceAll("-", "_")); } private ApplicationId dashToUnderscore(ApplicationId id) { return dashToUnderscore(TenantAndApplicationId.from(id)).instance(id.instance()); } private QuotaUsage deploymentQuotaUsage(ZoneId zoneId, ApplicationId applicationId) { var application = configServer.nodeRepository().getApplication(zoneId, applicationId); return DeploymentQuotaCalculator.calculateQuotaUsage(application); } /* * Get the AthenzUser from this principal or Optional.empty if this does not represent a user. */ private Optional<AthenzUser> getUser(Optional<Principal> deployer) { return deployer .filter(AthenzPrincipal.class::isInstance) .map(AthenzPrincipal.class::cast) .map(AthenzPrincipal::getIdentity) .filter(AthenzUser.class::isInstance) .map(AthenzUser.class::cast); } /* * Verifies that the configured athenz service (if any) can be launched. */ private void verifyAllowedLaunchAthenzService(DeploymentSpec deploymentSpec) { deploymentSpec.athenzDomain().ifPresent(domain -> { controller.zoneRegistry().zones().reachable().ids().forEach(zone -> { AthenzIdentity configServerAthenzIdentity = controller.zoneRegistry().getConfigServerHttpsIdentity(zone); deploymentSpec.athenzService().ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); deploymentSpec.instances().forEach(spec -> { spec.athenzService(zone.environment(), zone.region()).ifPresent(service -> { verifyAthenzServiceCanBeLaunchedBy(configServerAthenzIdentity, new AthenzService(domain.value(), service.value())); }); }); }); }); } private void verifyAthenzServiceCanBeLaunchedBy(AthenzIdentity configServerAthenzIdentity, AthenzService athenzService) { if ( ! ((AthenzFacade) accessControl).canLaunch(configServerAthenzIdentity, athenzService)) throw new IllegalArgumentException("Not allowed to launch Athenz service " + athenzService.getFullName()); } /** Returns the latest known version within the given major, which is not newer than the system version. */ public Optional<Version> lastCompatibleVersion(int targetMajorVersion) { VersionStatus versions = controller.readVersionStatus(); Version systemVersion = controller.systemVersion(versions); return versions.versions().stream() .map(VespaVersion::versionNumber) .filter(version -> version.getMajor() == targetMajorVersion) .filter(version -> ! version.isAfter(systemVersion)) .max(naturalOrder()); } /** Extract deployment warnings metric from deployment result */ private static Map<DeploymentMetrics.Warning, Integer> warningsFrom(ActivateResult result) { if (result.prepareResponse().log == null) return Map.of(); Map<DeploymentMetrics.Warning, Integer> warnings = new HashMap<>(); for (Log log : result.prepareResponse().log) { if (!"warn".equalsIgnoreCase(log.level) && !"warning".equalsIgnoreCase(log.level)) continue; warnings.merge(DeploymentMetrics.Warning.all, 1, Integer::sum); } return Map.copyOf(warnings); } }
Can you change the last part to "or old certificate"? What "one" refers to is ambiguous.
private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.code()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: case CONFIG_NOT_CONVERGED: logger.log("Deployment failed with possibly transient error " + e.code() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.message()); return result; case OUT_OF_CAPACITY: logger.log(e.message()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? result : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: new application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } }
logger.log("Waiting for certificate to become valid: new application, or old one has expired");
private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.code()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: case CONFIG_NOT_CONVERGED: logger.log("Deployment failed with possibly transient error " + e.code() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.message()); return result; case OUT_OF_CAPACITY: logger.log(e.message()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? result : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: new application, or old certificate has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger) .filter(result -> { if ( ! useTesterCertificate(id) || result != running) return true; return controller.jobController().run(id).get().stepStatus(deployTester).get() == succeeded && testerCertificate.equals(controller.jobController().run(id).get().testerCertificate()); }); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } @SuppressWarnings("deprecation") private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log("Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), NodeFilter.all() .applications(id.application()) .states(active)); Set<HostName> parentHostnames = nodes.stream().map(node -> node.parentHostname().get()).collect(toSet()); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), NodeFilter.all() .hostnames(parentHostnames)); boolean firstTick = run.convergenceSummary().isEmpty(); NodeList nodeList = NodeList.of(nodes, parents, services.get()); ConvergenceSummary summary = nodeList.summary(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } else if ( ! summary.converged()) { logger.log("Waiting for convergence of " + summary.services() + " services across " + summary.nodes() + " nodes"); if (summary.needPlatformUpgrade() > 0) logger.log(summary.upgradingPlatform() + "/" + summary.needPlatformUpgrade() + " nodes upgrading platform"); if (summary.needReboot() > 0) logger.log(summary.rebooting() + "/" + summary.needReboot() + " nodes rebooting"); if (summary.needRestart() > 0) logger.log(summary.restarting() + "/" + summary.needRestart() + " nodes restarting"); if (summary.retiring() > 0) logger.log(summary.retiring() + " nodes retiring"); if (summary.upgradingFirmware() > 0) logger.log(summary.upgradingFirmware() + " nodes upgrading firmware"); if (summary.upgradingOs() > 0) logger.log(summary.upgradingOs() + " nodes upgrading OS"); if (summary.needNewConfig() > 0) logger.log(summary.needNewConfig() + " application services upgrading"); } if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.isStateful() .suspendedSince(controller.clock().instant().minus(timeouts.statefulNodesDown())) .and(nodeList.not().isStateful() .suspendedSince(controller.clock().instant().minus(timeouts.statelessNodesDown())) ); if ( ! suspendedTooLong.isEmpty() && deployment.get().at().plus(timeouts.statelessNodesDown()).isBefore(controller.clock().instant())) { failureReason = "Some nodes have been suspended for more than the allowed threshold:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "Timed out after waiting " + timeouts.noNodesDown().toMinutes() + " minutes for " + "nodes to suspend. This is normal if the cluster is excessively busy. " + "Nodes will continue to attempt suspension to progress installation independently of " + "this run."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(FINE, nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all() .applications(testerId) .states(active, reserved)); Set<HostName> parentHostnames = nodes.stream().map(node -> node.parentHostname().get()).collect(toSet()); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all() .hostnames(parentHostnames)); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all calls to endpoint in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().readTestRunnerEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; return endpoints.get(zoneId).parallelStream().allMatch(endpoint -> { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if (!ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); } return ready; }); } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { DeploymentId deployment = new DeploymentId(id, zone); Map<ZoneId, List<Endpoint>> endpoints = controller.routing().readTestRunnerEndpointsOf(Set.of(deployment)); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } DeploymentRoutingContext context = controller.routing().of(deployment); if (context.routingMethod() == RoutingMethod.exclusive) { RoutingPolicy policy = context.routingPolicy(ClusterSpec.Id.from(endpoint.name())) .orElseThrow(() -> new IllegalStateException(endpoint + " has no matching policy")); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().readTestRunnerEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); controller.jobController().updateTestReport(id); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (ConfigServerException e) { Instant doom = controller.jobController().run(id).get().stepInfo(copyVespaLogs).get().startTime().get() .plus(Duration.ofMinutes(3)); if (e.code() == ConfigServerException.ErrorCode.NOT_FOUND && controller.clock().instant().isBefore(doom)) { logger.log(INFO, "Found no logs, but will retry"); return Optional.empty(); } else { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } catch (RuntimeException e) { Instant start = controller.jobController().run(id).get().stepInfo(report).get().startTime().get(); return (controller.clock().instant().isAfter(start.plusSeconds(180))) ? Optional.empty() : Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { if ( ! isNewFailure(run)) return; Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private boolean isNewFailure(Run run) { return controller.jobController().lastCompleted(run.id().job()) .map(previous -> ! previous.hasFailed() || ! previous.versions().targetsMatch(run.versions())) .orElse(true); } private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.deployment, Notification.Level.error, msg); switch (run.status()) { case aborted: return; case running: case success: controller.notificationsDb().removeNotification(source, Notification.Type.deployment); return; case outOfCapacity: if ( ! run.id().type().environment().isTest()) updater.accept("lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("nodes were not able to upgrade to the new configuration"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } private boolean useTesterCertificate(RunId id) { return controller.system().isPublic() && id.type().environment().isTest(); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = useTesterCertificate(id); byte[] servicesXml = servicesXml( ! controller.system().isPublic(), useTesterCertificate, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + deploymentXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("artifacts/.ignore-" + UUID.randomUUID(), new byte[0]); zipBuilder.add("tests/.ignore-" + UUID.randomUUID(), new byte[0]); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = Text.format( "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" + "\n" + " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.VespaCliTestRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.vespa-cli-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <testsPath>tests</testsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(INFO, List.of(messages)); } private void log(Level level, String... messages) { log(level, List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { log(INFO, messages); } private void log(Level level, List<String> messages) { controller.jobController().log(id, step, level, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration statelessNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration statefulNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 720); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger) .filter(result -> { if ( ! useTesterCertificate(id) || result != running) return true; return controller.jobController().run(id).get().stepStatus(deployTester).get() == succeeded && testerCertificate.equals(controller.jobController().run(id).get().testerCertificate()); }); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } @SuppressWarnings("deprecation") private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log("Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), NodeFilter.all() .applications(id.application()) .states(active)); Set<HostName> parentHostnames = nodes.stream().map(node -> node.parentHostname().get()).collect(toSet()); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), NodeFilter.all() .hostnames(parentHostnames)); boolean firstTick = run.convergenceSummary().isEmpty(); NodeList nodeList = NodeList.of(nodes, parents, services.get()); ConvergenceSummary summary = nodeList.summary(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } else if ( ! summary.converged()) { logger.log("Waiting for convergence of " + summary.services() + " services across " + summary.nodes() + " nodes"); if (summary.needPlatformUpgrade() > 0) logger.log(summary.upgradingPlatform() + "/" + summary.needPlatformUpgrade() + " nodes upgrading platform"); if (summary.needReboot() > 0) logger.log(summary.rebooting() + "/" + summary.needReboot() + " nodes rebooting"); if (summary.needRestart() > 0) logger.log(summary.restarting() + "/" + summary.needRestart() + " nodes restarting"); if (summary.retiring() > 0) logger.log(summary.retiring() + " nodes retiring"); if (summary.upgradingFirmware() > 0) logger.log(summary.upgradingFirmware() + " nodes upgrading firmware"); if (summary.upgradingOs() > 0) logger.log(summary.upgradingOs() + " nodes upgrading OS"); if (summary.needNewConfig() > 0) logger.log(summary.needNewConfig() + " application services upgrading"); } if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.isStateful() .suspendedSince(controller.clock().instant().minus(timeouts.statefulNodesDown())) .and(nodeList.not().isStateful() .suspendedSince(controller.clock().instant().minus(timeouts.statelessNodesDown())) ); if ( ! suspendedTooLong.isEmpty() && deployment.get().at().plus(timeouts.statelessNodesDown()).isBefore(controller.clock().instant())) { failureReason = "Some nodes have been suspended for more than the allowed threshold:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "Timed out after waiting " + timeouts.noNodesDown().toMinutes() + " minutes for " + "nodes to suspend. This is normal if the cluster is excessively busy. " + "Nodes will continue to attempt suspension to progress installation independently of " + "this run."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(FINE, nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all() .applications(testerId) .states(active, reserved)); Set<HostName> parentHostnames = nodes.stream().map(node -> node.parentHostname().get()).collect(toSet()); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all() .hostnames(parentHostnames)); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all calls to endpoint in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().readTestRunnerEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; return endpoints.get(zoneId).parallelStream().allMatch(endpoint -> { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if (!ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); } return ready; }); } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { DeploymentId deployment = new DeploymentId(id, zone); Map<ZoneId, List<Endpoint>> endpoints = controller.routing().readTestRunnerEndpointsOf(Set.of(deployment)); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } DeploymentRoutingContext context = controller.routing().of(deployment); if (context.routingMethod() == RoutingMethod.exclusive) { RoutingPolicy policy = context.routingPolicy(ClusterSpec.Id.from(endpoint.name())) .orElseThrow(() -> new IllegalStateException(endpoint + " has no matching policy")); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().readTestRunnerEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); controller.jobController().updateTestReport(id); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (ConfigServerException e) { Instant doom = controller.jobController().run(id).get().stepInfo(copyVespaLogs).get().startTime().get() .plus(Duration.ofMinutes(3)); if (e.code() == ConfigServerException.ErrorCode.NOT_FOUND && controller.clock().instant().isBefore(doom)) { logger.log(INFO, "Found no logs, but will retry"); return Optional.empty(); } else { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } catch (RuntimeException e) { Instant start = controller.jobController().run(id).get().stepInfo(report).get().startTime().get(); return (controller.clock().instant().isAfter(start.plusSeconds(180))) ? Optional.empty() : Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { if ( ! isNewFailure(run)) return; Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private boolean isNewFailure(Run run) { return controller.jobController().lastCompleted(run.id().job()) .map(previous -> ! previous.hasFailed() || ! previous.versions().targetsMatch(run.versions())) .orElse(true); } private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.deployment, Notification.Level.error, msg); switch (run.status()) { case aborted: return; case running: case success: controller.notificationsDb().removeNotification(source, Notification.Type.deployment); return; case outOfCapacity: if ( ! run.id().type().environment().isTest()) updater.accept("lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("nodes were not able to upgrade to the new configuration"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } private boolean useTesterCertificate(RunId id) { return controller.system().isPublic() && id.type().environment().isTest(); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = useTesterCertificate(id); byte[] servicesXml = servicesXml( ! controller.system().isPublic(), useTesterCertificate, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + deploymentXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("artifacts/.ignore-" + UUID.randomUUID(), new byte[0]); zipBuilder.add("tests/.ignore-" + UUID.randomUUID(), new byte[0]); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = Text.format( "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" + "\n" + " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.VespaCliTestRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.vespa-cli-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <testsPath>tests</testsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(INFO, List.of(messages)); } private void log(Level level, String... messages) { log(level, List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { log(INFO, messages); } private void log(Level level, List<String> messages) { controller.jobController().log(id, step, level, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration statelessNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration statefulNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 720); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
```suggestion logger.log("Waiting for certificate to become valid: new application, or old certificate has expired"); ```
private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.code()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: case CONFIG_NOT_CONVERGED: logger.log("Deployment failed with possibly transient error " + e.code() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.message()); return result; case OUT_OF_CAPACITY: logger.log(e.message()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? result : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: new application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } }
logger.log("Waiting for certificate to become valid: new application, or old one has expired");
private Optional<RunStatus> deploy(Supplier<ActivateResult> deployment, Instant startTime, DualLogger logger) { try { PrepareResponse prepareResponse = deployment.get().prepareResponse(); if (prepareResponse.log != null) logger.logAll(prepareResponse.log.stream() .map(entry -> new LogEntry(0, Instant.ofEpochMilli(entry.time), LogEntry.typeOf(LogLevel.parse(entry.level)), entry.message)) .collect(toList())); logger.log("Deployment successful."); if (prepareResponse.message != null) logger.log(prepareResponse.message); return Optional.of(running); } catch (ConfigServerException e) { Optional<RunStatus> result = startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(deploymentFailed) : Optional.empty(); switch (e.code()) { case CERTIFICATE_NOT_READY: logger.log("Waiting for certificate to become ready on config server: New application, or old one has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Certificate did not become available on config server within (" + timeouts.endpointCertificate() + ")"); return Optional.of(RunStatus.endpointCertificateTimeout); } return result; case ACTIVATION_CONFLICT: case APPLICATION_LOCK_FAILURE: case CONFIG_NOT_CONVERGED: logger.log("Deployment failed with possibly transient error " + e.code() + ", will retry: " + e.getMessage()); return result; case LOAD_BALANCER_NOT_READY: case PARENT_HOST_NOT_READY: logger.log(e.message()); return result; case OUT_OF_CAPACITY: logger.log(e.message()); return controller.system().isCd() && startTime.plus(timeouts.capacity()).isAfter(controller.clock().instant()) ? result : Optional.of(outOfCapacity); case INVALID_APPLICATION_PACKAGE: case BAD_REQUEST: logger.log(WARNING, e.getMessage()); return Optional.of(deploymentFailed); } throw e; } catch (EndpointCertificateException e) { switch (e.type()) { case CERT_NOT_AVAILABLE: logger.log("Waiting for certificate to become valid: new application, or old certificate has expired"); if (startTime.plus(timeouts.endpointCertificate()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Controller could not validate certificate within " + timeouts.endpointCertificate() + ": " + Exceptions.toMessageString(e)); return Optional.of(RunStatus.endpointCertificateTimeout); } return Optional.empty(); default: throw e; } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger) .filter(result -> { if ( ! useTesterCertificate(id) || result != running) return true; return controller.jobController().run(id).get().stepStatus(deployTester).get() == succeeded && testerCertificate.equals(controller.jobController().run(id).get().testerCertificate()); }); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } @SuppressWarnings("deprecation") private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log("Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), NodeFilter.all() .applications(id.application()) .states(active)); Set<HostName> parentHostnames = nodes.stream().map(node -> node.parentHostname().get()).collect(toSet()); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), NodeFilter.all() .hostnames(parentHostnames)); boolean firstTick = run.convergenceSummary().isEmpty(); NodeList nodeList = NodeList.of(nodes, parents, services.get()); ConvergenceSummary summary = nodeList.summary(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } else if ( ! summary.converged()) { logger.log("Waiting for convergence of " + summary.services() + " services across " + summary.nodes() + " nodes"); if (summary.needPlatformUpgrade() > 0) logger.log(summary.upgradingPlatform() + "/" + summary.needPlatformUpgrade() + " nodes upgrading platform"); if (summary.needReboot() > 0) logger.log(summary.rebooting() + "/" + summary.needReboot() + " nodes rebooting"); if (summary.needRestart() > 0) logger.log(summary.restarting() + "/" + summary.needRestart() + " nodes restarting"); if (summary.retiring() > 0) logger.log(summary.retiring() + " nodes retiring"); if (summary.upgradingFirmware() > 0) logger.log(summary.upgradingFirmware() + " nodes upgrading firmware"); if (summary.upgradingOs() > 0) logger.log(summary.upgradingOs() + " nodes upgrading OS"); if (summary.needNewConfig() > 0) logger.log(summary.needNewConfig() + " application services upgrading"); } if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.isStateful() .suspendedSince(controller.clock().instant().minus(timeouts.statefulNodesDown())) .and(nodeList.not().isStateful() .suspendedSince(controller.clock().instant().minus(timeouts.statelessNodesDown())) ); if ( ! suspendedTooLong.isEmpty() && deployment.get().at().plus(timeouts.statelessNodesDown()).isBefore(controller.clock().instant())) { failureReason = "Some nodes have been suspended for more than the allowed threshold:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "Timed out after waiting " + timeouts.noNodesDown().toMinutes() + " minutes for " + "nodes to suspend. This is normal if the cluster is excessively busy. " + "Nodes will continue to attempt suspension to progress installation independently of " + "this run."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(FINE, nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all() .applications(testerId) .states(active, reserved)); Set<HostName> parentHostnames = nodes.stream().map(node -> node.parentHostname().get()).collect(toSet()); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all() .hostnames(parentHostnames)); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all calls to endpoint in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().readTestRunnerEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; return endpoints.get(zoneId).parallelStream().allMatch(endpoint -> { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if (!ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); } return ready; }); } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { DeploymentId deployment = new DeploymentId(id, zone); Map<ZoneId, List<Endpoint>> endpoints = controller.routing().readTestRunnerEndpointsOf(Set.of(deployment)); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } DeploymentRoutingContext context = controller.routing().of(deployment); if (context.routingMethod() == RoutingMethod.exclusive) { RoutingPolicy policy = context.routingPolicy(ClusterSpec.Id.from(endpoint.name())) .orElseThrow(() -> new IllegalStateException(endpoint + " has no matching policy")); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().readTestRunnerEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); controller.jobController().updateTestReport(id); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (ConfigServerException e) { Instant doom = controller.jobController().run(id).get().stepInfo(copyVespaLogs).get().startTime().get() .plus(Duration.ofMinutes(3)); if (e.code() == ConfigServerException.ErrorCode.NOT_FOUND && controller.clock().instant().isBefore(doom)) { logger.log(INFO, "Found no logs, but will retry"); return Optional.empty(); } else { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } catch (RuntimeException e) { Instant start = controller.jobController().run(id).get().stepInfo(report).get().startTime().get(); return (controller.clock().instant().isAfter(start.plusSeconds(180))) ? Optional.empty() : Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { if ( ! isNewFailure(run)) return; Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private boolean isNewFailure(Run run) { return controller.jobController().lastCompleted(run.id().job()) .map(previous -> ! previous.hasFailed() || ! previous.versions().targetsMatch(run.versions())) .orElse(true); } private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.deployment, Notification.Level.error, msg); switch (run.status()) { case aborted: return; case running: case success: controller.notificationsDb().removeNotification(source, Notification.Type.deployment); return; case outOfCapacity: if ( ! run.id().type().environment().isTest()) updater.accept("lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("nodes were not able to upgrade to the new configuration"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } private boolean useTesterCertificate(RunId id) { return controller.system().isPublic() && id.type().environment().isTest(); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = useTesterCertificate(id); byte[] servicesXml = servicesXml( ! controller.system().isPublic(), useTesterCertificate, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + deploymentXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("artifacts/.ignore-" + UUID.randomUUID(), new byte[0]); zipBuilder.add("tests/.ignore-" + UUID.randomUUID(), new byte[0]); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = Text.format( "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" + "\n" + " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.VespaCliTestRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.vespa-cli-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <testsPath>tests</testsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(INFO, List.of(messages)); } private void log(Level level, String... messages) { log(level, List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { log(INFO, messages); } private void log(Level level, List<String> messages) { controller.jobController().log(id, step, level, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration statelessNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration statefulNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 720); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
class InternalStepRunner implements StepRunner { private static final Logger logger = Logger.getLogger(InternalStepRunner.class.getName()); static final NodeResources DEFAULT_TESTER_RESOURCES = new NodeResources(1, 4, 50, 0.3, NodeResources.DiskSpeed.any); static final NodeResources DEFAULT_TESTER_RESOURCES_AWS = new NodeResources(2, 8, 50, 0.3, NodeResources.DiskSpeed.any); private final Controller controller; private final TestConfigSerializer testConfigSerializer; private final DeploymentFailureMails mails; private final Timeouts timeouts; public InternalStepRunner(Controller controller) { this.controller = controller; this.testConfigSerializer = new TestConfigSerializer(controller.system()); this.mails = new DeploymentFailureMails(controller.zoneRegistry()); this.timeouts = Timeouts.of(controller.system()); } @Override public Optional<RunStatus> run(LockedStep step, RunId id) { DualLogger logger = new DualLogger(id, step.get()); try { switch (step.get()) { case deployTester: return deployTester(id, logger); case deployInitialReal: return deployInitialReal(id, logger); case installInitialReal: return installInitialReal(id, logger); case deployReal: return deployReal(id, logger); case installTester: return installTester(id, logger); case installReal: return installReal(id, logger); case startStagingSetup: return startTests(id, true, logger); case endStagingSetup: case endTests: return endTests(id, logger); case startTests: return startTests(id, false, logger); case copyVespaLogs: return copyVespaLogs(id, logger); case deactivateReal: return deactivateReal(id, logger); case deactivateTester: return deactivateTester(id, logger); case report: return report(id, logger); default: throw new AssertionError("Unknown step '" + step + "'!"); } } catch (UncheckedIOException e) { logger.logWithInternalException(INFO, "IO exception running " + id + ": " + Exceptions.toMessageString(e), e); return Optional.empty(); } catch (RuntimeException e) { logger.log(WARNING, "Unexpected exception running " + id, e); if (step.get().alwaysRun()) { logger.log("Will keep trying, as this is a cleanup step."); return Optional.empty(); } return Optional.of(error); } } private Optional<RunStatus> deployInitialReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.sourcePlatform().orElse(versions.targetPlatform()) + " and application version " + versions.sourceApplication().orElse(versions.targetApplication()).id() + " ..."); return deployReal(id, true, logger); } private Optional<RunStatus> deployReal(RunId id, DualLogger logger) { Versions versions = controller.jobController().run(id).get().versions(); logger.log("Deploying platform version " + versions.targetPlatform() + " and application version " + versions.targetApplication().id() + " ..."); return deployReal(id, false, logger); } private Optional<RunStatus> deployReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); return deploy(() -> controller.applications().deploy(id.job(), setTheStage), controller.jobController().run(id).get() .stepInfo(setTheStage ? deployInitialReal : deployReal).get() .startTime().get(), logger) .filter(result -> { if ( ! useTesterCertificate(id) || result != running) return true; return controller.jobController().run(id).get().stepStatus(deployTester).get() == succeeded && testerCertificate.equals(controller.jobController().run(id).get().testerCertificate()); }); } private Optional<RunStatus> deployTester(RunId id, DualLogger logger) { Version platform = testerPlatformVersion(id); logger.log("Deploying the tester container on platform " + platform + " ..."); return deploy(() -> controller.applications().deployTester(id.tester(), testerPackage(id), id.type().zone(controller.system()), platform), controller.jobController().run(id).get() .stepInfo(deployTester).get() .startTime().get(), logger); } @SuppressWarnings("deprecation") private Optional<RunStatus> installInitialReal(RunId id, DualLogger logger) { return installReal(id, true, logger); } private Optional<RunStatus> installReal(RunId id, DualLogger logger) { return installReal(id, false, logger); } private Optional<RunStatus> installReal(RunId id, boolean setTheStage, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log("Deployment expired before installation was successful."); return Optional.of(installationFailed); } Versions versions = controller.jobController().run(id).get().versions(); Version platform = setTheStage ? versions.sourcePlatform().orElse(versions.targetPlatform()) : versions.targetPlatform(); Run run = controller.jobController().run(id).get(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(id.application(), id.type().zone(controller.system())), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), NodeFilter.all() .applications(id.application()) .states(active)); Set<HostName> parentHostnames = nodes.stream().map(node -> node.parentHostname().get()).collect(toSet()); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(id.type().zone(controller.system()), NodeFilter.all() .hostnames(parentHostnames)); boolean firstTick = run.convergenceSummary().isEmpty(); NodeList nodeList = NodeList.of(nodes, parents, services.get()); ConvergenceSummary summary = nodeList.summary(); if (firstTick) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); } else if ( ! summary.converged()) { logger.log("Waiting for convergence of " + summary.services() + " services across " + summary.nodes() + " nodes"); if (summary.needPlatformUpgrade() > 0) logger.log(summary.upgradingPlatform() + "/" + summary.needPlatformUpgrade() + " nodes upgrading platform"); if (summary.needReboot() > 0) logger.log(summary.rebooting() + "/" + summary.needReboot() + " nodes rebooting"); if (summary.needRestart() > 0) logger.log(summary.restarting() + "/" + summary.needRestart() + " nodes restarting"); if (summary.retiring() > 0) logger.log(summary.retiring() + " nodes retiring"); if (summary.upgradingFirmware() > 0) logger.log(summary.upgradingFirmware() + " nodes upgrading firmware"); if (summary.upgradingOs() > 0) logger.log(summary.upgradingOs() + " nodes upgrading OS"); if (summary.needNewConfig() > 0) logger.log(summary.needNewConfig() + " application services upgrading"); } if (summary.converged()) { controller.jobController().locked(id, lockedRun -> lockedRun.withSummary(null)); if (endpointsAvailable(id.application(), id.type().zone(controller.system()), logger)) { if (containersAreUp(id.application(), id.type().zone(controller.system()), logger)) { logger.log("Installation succeeded!"); return Optional.of(running); } } else if (timedOut(id, deployment.get(), timeouts.endpoint())) { logger.log(WARNING, "Endpoints failed to show up within " + timeouts.endpoint().toMinutes() + " minutes!"); return Optional.of(error); } } String failureReason = null; NodeList suspendedTooLong = nodeList.isStateful() .suspendedSince(controller.clock().instant().minus(timeouts.statefulNodesDown())) .and(nodeList.not().isStateful() .suspendedSince(controller.clock().instant().minus(timeouts.statelessNodesDown())) ); if ( ! suspendedTooLong.isEmpty() && deployment.get().at().plus(timeouts.statelessNodesDown()).isBefore(controller.clock().instant())) { failureReason = "Some nodes have been suspended for more than the allowed threshold:\n" + suspendedTooLong.asList().stream().map(node -> node.node().hostname().value()).collect(joining("\n")); } if (run.noNodesDownSince() .map(since -> since.isBefore(controller.clock().instant().minus(timeouts.noNodesDown()))) .orElse(false)) { if (summary.needPlatformUpgrade() > 0 || summary.needReboot() > 0 || summary.needRestart() > 0) failureReason = "Timed out after waiting " + timeouts.noNodesDown().toMinutes() + " minutes for " + "nodes to suspend. This is normal if the cluster is excessively busy. " + "Nodes will continue to attempt suspension to progress installation independently of " + "this run."; else failureReason = "Nodes not able to start with new application package."; } Duration timeout = JobRunner.jobTimeout.minusHours(1); if (timedOut(id, deployment.get(), timeout)) { failureReason = "Installation failed to complete within " + timeout.toHours() + "hours!"; } if (failureReason != null) { logger.log(" logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(" logger.log(nodeList.not().in(nodeList.not().needsNewConfig() .not().needsPlatformUpgrade() .not().needsReboot() .not().needsRestart() .not().needsFirmwareUpgrade() .not().needsOsUpgrade()) .asList().stream() .flatMap(node -> nodeDetails(node, true)) .collect(toList())); logger.log(INFO, failureReason); return Optional.of(installationFailed); } if ( ! firstTick) logger.log(FINE, nodeList.expectedDown().and(nodeList.needsNewConfig()).asList().stream() .distinct() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); controller.jobController().locked(id, lockedRun -> { Instant noNodesDownSince = nodeList.allowedDown().size() == 0 ? lockedRun.noNodesDownSince().orElse(controller.clock().instant()) : null; return lockedRun.noNodesDownSince(noNodesDownSince).withSummary(summary); }); return Optional.empty(); } private Version testerPlatformVersion(RunId id) { return application(id.application()).change().isPinned() ? controller.jobController().run(id).get().versions().targetPlatform() : controller.readSystemVersion(); } private Optional<RunStatus> installTester(RunId id, DualLogger logger) { Run run = controller.jobController().run(id).get(); Version platform = testerPlatformVersion(id); ZoneId zone = id.type().zone(controller.system()); ApplicationId testerId = id.tester().id(); Optional<ServiceConvergence> services = controller.serviceRegistry().configServer().serviceConvergence(new DeploymentId(testerId, zone), Optional.of(platform)); if (services.isEmpty()) { logger.log("Config status not currently available -- will retry."); return run.stepInfo(installTester).get().startTime().get().isBefore(controller.clock().instant().minus(Duration.ofMinutes(5))) ? Optional.of(error) : Optional.empty(); } List<Node> nodes = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all() .applications(testerId) .states(active, reserved)); Set<HostName> parentHostnames = nodes.stream().map(node -> node.parentHostname().get()).collect(toSet()); List<Node> parents = controller.serviceRegistry().configServer().nodeRepository().list(zone, NodeFilter.all() .hostnames(parentHostnames)); NodeList nodeList = NodeList.of(nodes, parents, services.get()); logger.log(nodeList.asList().stream() .flatMap(node -> nodeDetails(node, false)) .collect(toList())); if (nodeList.summary().converged() && testerContainersAreUp(testerId, zone, logger)) { logger.log("Tester container successfully installed!"); return Optional.of(running); } if (run.stepInfo(installTester).get().startTime().get().plus(timeouts.tester()).isBefore(controller.clock().instant())) { logger.log(WARNING, "Installation of tester failed to complete within " + timeouts.tester().toMinutes() + " minutes!"); return Optional.of(error); } return Optional.empty(); } /** Returns true iff all calls to endpoint in the deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean containersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { var endpoints = controller.routing().readTestRunnerEndpointsOf(Set.of(new DeploymentId(id, zoneId))); if ( ! endpoints.containsKey(zoneId)) return false; return endpoints.get(zoneId).parallelStream().allMatch(endpoint -> { boolean ready = controller.jobController().cloud().ready(endpoint.url()); if (!ready) { logger.log("Failed to get 100 consecutive OKs from " + endpoint); } return ready; }); } /** Returns true iff all containers in the tester deployment give 100 consecutive 200 OK responses on /status.html. */ private boolean testerContainersAreUp(ApplicationId id, ZoneId zoneId, DualLogger logger) { DeploymentId deploymentId = new DeploymentId(id, zoneId); if (controller.jobController().cloud().testerReady(deploymentId)) { return true; } else { logger.log("Failed to get 100 consecutive OKs from tester container for " + deploymentId); return false; } } private boolean endpointsAvailable(ApplicationId id, ZoneId zone, DualLogger logger) { DeploymentId deployment = new DeploymentId(id, zone); Map<ZoneId, List<Endpoint>> endpoints = controller.routing().readTestRunnerEndpointsOf(Set.of(deployment)); if ( ! endpoints.containsKey(zone)) { logger.log("Endpoints not yet ready."); return false; } for (var endpoint : endpoints.get(zone)) { HostName endpointName = HostName.from(endpoint.dnsName()); var ipAddress = controller.jobController().cloud().resolveHostName(endpointName); if (ipAddress.isEmpty()) { logger.log(INFO, "DNS lookup yielded no IP address for '" + endpointName + "'."); return false; } DeploymentRoutingContext context = controller.routing().of(deployment); if (context.routingMethod() == RoutingMethod.exclusive) { RoutingPolicy policy = context.routingPolicy(ClusterSpec.Id.from(endpoint.name())) .orElseThrow(() -> new IllegalStateException(endpoint + " has no matching policy")); var cNameValue = controller.jobController().cloud().resolveCname(endpointName); if ( ! cNameValue.map(policy.canonicalName()::equals).orElse(false)) { logger.log(INFO, "CNAME '" + endpointName + "' points at " + cNameValue.map(name -> "'" + name + "'").orElse("nothing") + " but should point at load balancer '" + policy.canonicalName() + "'"); return false; } var loadBalancerAddress = controller.jobController().cloud().resolveHostName(policy.canonicalName()); if ( ! loadBalancerAddress.equals(ipAddress)) { logger.log(INFO, "IP address of CNAME '" + endpointName + "' (" + ipAddress.get() + ") and load balancer '" + policy.canonicalName() + "' (" + loadBalancerAddress.orElse("empty") + ") are not equal"); return false; } } } logEndpoints(endpoints, logger); return true; } private void logEndpoints(Map<ZoneId, List<Endpoint>> zoneEndpoints, DualLogger logger) { List<String> messages = new ArrayList<>(); messages.add("Found endpoints:"); zoneEndpoints.forEach((zone, endpoints) -> { messages.add("- " + zone); for (Endpoint endpoint : endpoints) messages.add(" |-- " + endpoint.url() + " (cluster '" + endpoint.name() + "')"); }); logger.log(messages); } private Stream<String> nodeDetails(NodeWithServices node, boolean printAllServices) { return Stream.concat(Stream.of(node.node().hostname() + ": " + humanize(node.node().serviceState()) + (node.node().suspendedSince().map(since -> " since " + since).orElse("")), "--- platform " + wantedPlatform(node.node()) + (node.needsPlatformUpgrade() ? " <-- " + currentPlatform(node.node()) : "") + (node.needsOsUpgrade() && node.isAllowedDown() ? ", upgrading OS (" + node.parent().wantedOsVersion() + " <-- " + node.parent().currentOsVersion() + ")" : "") + (node.needsFirmwareUpgrade() && node.isAllowedDown() ? ", upgrading firmware" : "") + (node.needsRestart() ? ", restart pending (" + node.node().wantedRestartGeneration() + " <-- " + node.node().restartGeneration() + ")" : "") + (node.needsReboot() ? ", reboot pending (" + node.node().wantedRebootGeneration() + " <-- " + node.node().rebootGeneration() + ")" : "")), node.services().stream() .filter(service -> printAllServices || node.needsNewConfig()) .map(service -> "--- " + service.type() + " on port " + service.port() + (service.currentGeneration() == -1 ? " has not started " : " has config generation " + service.currentGeneration() + ", wanted is " + node.wantedConfigGeneration()))); } private String wantedPlatform(Node node) { return node.wantedDockerImage().repository() + ":" + node.wantedVersion(); } private String currentPlatform(Node node) { String currentRepo = node.currentDockerImage().repository(); String wantedRepo = node.wantedDockerImage().repository(); return (currentRepo.equals(wantedRepo) ? "" : currentRepo + ":") + node.currentVersion(); } private String humanize(Node.ServiceState state) { switch (state) { case allowedDown: return "allowed to be DOWN"; case expectedUp: return "expected to be UP"; case permanentlyDown: return "permanently DOWN"; case unorchestrated: return "unorchestrated"; default: return state.name(); } } private Optional<RunStatus> startTests(RunId id, boolean isSetup, DualLogger logger) { Optional<Deployment> deployment = deployment(id.application(), id.type()); if (deployment.isEmpty()) { logger.log(INFO, "Deployment expired before tests could start."); return Optional.of(error); } var deployments = controller.applications().requireInstance(id.application()) .productionDeployments().keySet().stream() .map(zone -> new DeploymentId(id.application(), zone)) .collect(Collectors.toSet()); ZoneId zoneId = id.type().zone(controller.system()); deployments.add(new DeploymentId(id.application(), zoneId)); logger.log("Attempting to find endpoints ..."); var endpoints = controller.routing().readTestRunnerEndpointsOf(deployments); if ( ! endpoints.containsKey(zoneId)) { logger.log(WARNING, "Endpoints for the deployment to test vanished again, while it was still active!"); return Optional.of(error); } logEndpoints(endpoints, logger); if (!controller.jobController().cloud().testerReady(getTesterDeploymentId(id))) { logger.log(WARNING, "Tester container went bad!"); return Optional.of(error); } logger.log("Starting tests ..."); TesterCloud.Suite suite = TesterCloud.Suite.of(id.type(), isSetup); byte[] config = testConfigSerializer.configJson(id.application(), id.type(), true, endpoints, controller.applications().reachableContentClustersByZone(deployments)); controller.jobController().cloud().startTests(getTesterDeploymentId(id), suite, config); return Optional.of(running); } private Optional<RunStatus> endTests(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isEmpty()) { logger.log(INFO, "Deployment expired before tests could complete."); return Optional.of(aborted); } Optional<X509Certificate> testerCertificate = controller.jobController().run(id).get().testerCertificate(); if (testerCertificate.isPresent()) { try { testerCertificate.get().checkValidity(Date.from(controller.clock().instant())); } catch (CertificateExpiredException | CertificateNotYetValidException e) { logger.log(WARNING, "Tester certificate expired before tests could complete."); return Optional.of(aborted); } } controller.jobController().updateTestLog(id); TesterCloud.Status testStatus = controller.jobController().cloud().getStatus(getTesterDeploymentId(id)); switch (testStatus) { case NOT_STARTED: throw new IllegalStateException("Tester reports tests not started, even though they should have!"); case RUNNING: return Optional.empty(); case FAILURE: logger.log("Tests failed."); controller.jobController().updateTestReport(id); return Optional.of(testFailure); case ERROR: logger.log(INFO, "Tester failed running its tests!"); controller.jobController().updateTestReport(id); return Optional.of(error); case SUCCESS: logger.log("Tests completed successfully."); controller.jobController().updateTestReport(id); return Optional.of(running); default: throw new IllegalStateException("Unknown status '" + testStatus + "'!"); } } private Optional<RunStatus> copyVespaLogs(RunId id, DualLogger logger) { if (deployment(id.application(), id.type()).isPresent()) try { controller.jobController().updateVespaLog(id); } catch (ConfigServerException e) { Instant doom = controller.jobController().run(id).get().stepInfo(copyVespaLogs).get().startTime().get() .plus(Duration.ofMinutes(3)); if (e.code() == ConfigServerException.ErrorCode.NOT_FOUND && controller.clock().instant().isBefore(doom)) { logger.log(INFO, "Found no logs, but will retry"); return Optional.empty(); } else { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } } catch (Exception e) { logger.log(INFO, "Failure getting vespa logs for " + id, e); return Optional.of(error); } return Optional.of(running); } private Optional<RunStatus> deactivateReal(RunId id, DualLogger logger) { try { logger.log("Deactivating deployment of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.applications().deactivate(id.application(), id.type().zone(controller.system())); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting application " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateReal).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> deactivateTester(RunId id, DualLogger logger) { try { logger.log("Deactivating tester of " + id.application() + " in " + id.type().zone(controller.system()) + " ..."); controller.jobController().deactivateTester(id.tester(), id.type()); return Optional.of(running); } catch (RuntimeException e) { logger.log(WARNING, "Failed deleting tester of " + id.application(), e); Instant startTime = controller.jobController().run(id).get().stepInfo(deactivateTester).get().startTime().get(); return startTime.isBefore(controller.clock().instant().minus(Duration.ofHours(1))) ? Optional.of(error) : Optional.empty(); } } private Optional<RunStatus> report(RunId id, DualLogger logger) { try { controller.jobController().active(id).ifPresent(run -> { if (run.hasFailed()) sendEmailNotification(run, logger); updateConsoleNotification(run); }); } catch (IllegalStateException e) { logger.log(INFO, "Job '" + id.type() + "' no longer supposed to run?", e); return Optional.of(error); } catch (RuntimeException e) { Instant start = controller.jobController().run(id).get().stepInfo(report).get().startTime().get(); return (controller.clock().instant().isAfter(start.plusSeconds(180))) ? Optional.empty() : Optional.of(error); } return Optional.of(running); } /** Sends a mail with a notification of a failed run, if one should be sent. */ private void sendEmailNotification(Run run, DualLogger logger) { if ( ! isNewFailure(run)) return; Application application = controller.applications().requireApplication(TenantAndApplicationId.from(run.id().application())); Notifications notifications = application.deploymentSpec().requireInstance(run.id().application().instance()).notifications(); boolean newCommit = application.require(run.id().application().instance()).change().application() .map(run.versions().targetApplication()::equals) .orElse(false); When when = newCommit ? failingCommit : failing; List<String> recipients = new ArrayList<>(notifications.emailAddressesFor(when)); if (notifications.emailRolesFor(when).contains(author)) run.versions().targetApplication().authorEmail().ifPresent(recipients::add); if (recipients.isEmpty()) return; try { logger.log(INFO, "Sending failure notification to " + String.join(", ", recipients)); mailOf(run, recipients).ifPresent(controller.serviceRegistry().mailer()::send); } catch (RuntimeException e) { logger.log(WARNING, "Exception trying to send mail for " + run.id(), e); } } private boolean isNewFailure(Run run) { return controller.jobController().lastCompleted(run.id().job()) .map(previous -> ! previous.hasFailed() || ! previous.versions().targetsMatch(run.versions())) .orElse(true); } private void updateConsoleNotification(Run run) { NotificationSource source = NotificationSource.from(run.id()); Consumer<String> updater = msg -> controller.notificationsDb().setNotification(source, Notification.Type.deployment, Notification.Level.error, msg); switch (run.status()) { case aborted: return; case running: case success: controller.notificationsDb().removeNotification(source, Notification.Type.deployment); return; case outOfCapacity: if ( ! run.id().type().environment().isTest()) updater.accept("lack of capacity. Please contact the Vespa team to request more!"); return; case deploymentFailed: updater.accept("invalid application configuration, or timeout of other deployments of the same application"); return; case installationFailed: updater.accept("nodes were not able to upgrade to the new configuration"); return; case testFailure: updater.accept("one or more verification tests against the deployment failed"); return; case error: case endpointCertificateTimeout: break; default: logger.log(WARNING, "Don't know what to set console notification to for run status '" + run.status() + "'"); } updater.accept("something in the framework went wrong. Such errors are " + "usually transient. Please contact the Vespa team if the problem persists!"); } private Optional<Mail> mailOf(Run run, List<String> recipients) { switch (run.status()) { case running: case aborted: case success: return Optional.empty(); case outOfCapacity: return run.id().type().isProduction() ? Optional.of(mails.outOfCapacity(run.id(), recipients)) : Optional.empty(); case deploymentFailed: return Optional.of(mails.deploymentFailure(run.id(), recipients)); case installationFailed: return Optional.of(mails.installationFailure(run.id(), recipients)); case testFailure: return Optional.of(mails.testFailure(run.id(), recipients)); case error: case endpointCertificateTimeout: return Optional.of(mails.systemError(run.id(), recipients)); default: logger.log(WARNING, "Don't know what mail to send for run status '" + run.status() + "'"); return Optional.of(mails.systemError(run.id(), recipients)); } } /** Returns the deployment of the real application in the zone of the given job, if it exists. */ private Optional<Deployment> deployment(ApplicationId id, JobType type) { return Optional.ofNullable(application(id).deployments().get(type.zone(controller.system()))); } /** Returns the real application with the given id. */ private Instance application(ApplicationId id) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), __ -> { }); return controller.applications().requireInstance(id); } /** * Returns whether the time since deployment is more than the zone deployment expiry, or the given timeout. * * We time out the job before the deployment expires, for zones where deployments are not persistent, * to be able to collect the Vespa log from the deployment. Thus, the lower of the zone's deployment expiry, * and the given default installation timeout, minus one minute, is used as a timeout threshold. */ private boolean timedOut(RunId id, Deployment deployment, Duration defaultTimeout) { Run run = controller.jobController().run(id).get(); if ( ! controller.system().isCd() && run.start().isAfter(deployment.at())) return false; Duration timeout = controller.zoneRegistry().getDeploymentTimeToLive(deployment.zone()) .filter(zoneTimeout -> zoneTimeout.compareTo(defaultTimeout) < 0) .orElse(defaultTimeout); return deployment.at().isBefore(controller.clock().instant().minus(timeout.minus(Duration.ofMinutes(1)))); } private boolean useTesterCertificate(RunId id) { return controller.system().isPublic() && id.type().environment().isTest(); } /** Returns the application package for the tester application, assembled from a generated config, fat-jar and services.xml. */ private ApplicationPackage testerPackage(RunId id) { ApplicationVersion version = controller.jobController().run(id).get().versions().targetApplication(); DeploymentSpec spec = controller.applications().requireApplication(TenantAndApplicationId.from(id.application())).deploymentSpec(); ZoneId zone = id.type().zone(controller.system()); boolean useTesterCertificate = useTesterCertificate(id); byte[] servicesXml = servicesXml( ! controller.system().isPublic(), useTesterCertificate, testerResourcesFor(zone, spec.requireInstance(id.application().instance())), controller.controllerConfig().steprunner().testerapp()); byte[] testPackage = controller.applications().applicationStore().getTester(id.application().tenant(), id.application().application(), version); byte[] deploymentXml = deploymentXml(id.tester(), spec.athenzDomain(), spec.requireInstance(id.application().instance()).athenzService(zone.environment(), zone.region())); try (ZipBuilder zipBuilder = new ZipBuilder(testPackage.length + servicesXml.length + deploymentXml.length + 1000)) { zipBuilder.add(testPackage); zipBuilder.add("artifacts/.ignore-" + UUID.randomUUID(), new byte[0]); zipBuilder.add("tests/.ignore-" + UUID.randomUUID(), new byte[0]); zipBuilder.add("services.xml", servicesXml); zipBuilder.add("deployment.xml", deploymentXml); if (useTesterCertificate) appendAndStoreCertificate(zipBuilder, id); zipBuilder.close(); return new ApplicationPackage(zipBuilder.toByteArray()); } } private void appendAndStoreCertificate(ZipBuilder zipBuilder, RunId id) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA, 2048); X500Principal subject = new X500Principal("CN=" + id.tester().id().toFullString() + "." + id.type() + "." + id.number()); X509Certificate certificate = X509CertificateBuilder.fromKeypair(keyPair, subject, controller.clock().instant(), controller.clock().instant().plus(timeouts.testerCertificate()), SignatureAlgorithm.SHA512_WITH_RSA, BigInteger.valueOf(1)) .build(); controller.jobController().storeTesterCertificate(id, certificate); zipBuilder.add("artifacts/key", KeyUtils.toPem(keyPair.getPrivate()).getBytes(UTF_8)); zipBuilder.add("artifacts/cert", X509CertificateUtils.toPem(certificate).getBytes(UTF_8)); } private DeploymentId getTesterDeploymentId(RunId runId) { ZoneId zoneId = runId.type().zone(controller.system()); return new DeploymentId(runId.tester().id(), zoneId); } static NodeResources testerResourcesFor(ZoneId zone, DeploymentInstanceSpec spec) { NodeResources nodeResources = spec.steps().stream() .filter(step -> step.concerns(zone.environment())) .findFirst() .flatMap(step -> step.zones().get(0).testerFlavor()) .map(NodeResources::fromLegacyName) .orElse(zone.region().value().contains("aws-") ? DEFAULT_TESTER_RESOURCES_AWS : DEFAULT_TESTER_RESOURCES); return nodeResources.with(NodeResources.DiskSpeed.any); } /** Returns the generated services.xml content for the tester application. */ static byte[] servicesXml(boolean systemUsesAthenz, boolean useTesterCertificate, NodeResources resources, ControllerConfig.Steprunner.Testerapp config) { int jdiscMemoryGb = 2; int jdiscMemoryPct = (int) Math.ceil(100 * jdiscMemoryGb / resources.memoryGb()); int testMemoryMb = (int) (1024 * (resources.memoryGb() - jdiscMemoryGb) / 2); String resourceString = Text.format( "<resources vcpu=\"%.2f\" memory=\"%.2fGb\" disk=\"%.2fGb\" disk-speed=\"%s\" storage-type=\"%s\"/>", resources.vcpu(), resources.memoryGb(), resources.diskGb(), resources.diskSpeed().name(), resources.storageType().name()); String runtimeProviderClass = config.runtimeProviderClass(); String tenantCdBundle = config.tenantCdBundle(); String servicesXml = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<services xmlns:deploy='vespa' version='1.0'>\n" + " <container version='1.0' id='tester'>\n" + "\n" + " <component id=\"com.yahoo.vespa.hosted.testrunner.TestRunner\" bundle=\"vespa-testrunner-components\">\n" + " <config name=\"com.yahoo.vespa.hosted.testrunner.test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <surefireMemoryMb>" + testMemoryMb + "</surefireMemoryMb>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " <useTesterCertificate>" + useTesterCertificate + "</useTesterCertificate>\n" + " </config>\n" + " </component>\n" + "\n" + " <handler id=\"com.yahoo.vespa.testrunner.TestRunnerHandler\" bundle=\"vespa-osgi-testrunner\">\n" + " <binding>http: " </handler>\n" + "\n" + " <component id=\"" + runtimeProviderClass + "\" bundle=\"" + tenantCdBundle + "\" />\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.JunitRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.junit-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <component id=\"com.yahoo.vespa.testrunner.VespaCliTestRunner\" bundle=\"vespa-osgi-testrunner\">\n" + " <config name=\"com.yahoo.vespa.testrunner.vespa-cli-test-runner\">\n" + " <artifactsPath>artifacts</artifactsPath>\n" + " <testsPath>tests</testsPath>\n" + " <useAthenzCredentials>" + systemUsesAthenz + "</useAthenzCredentials>\n" + " </config>\n" + " </component>\n" + "\n" + " <nodes count=\"1\" allocated-memory=\"" + jdiscMemoryPct + "%\">\n" + " " + resourceString + "\n" + " </nodes>\n" + " </container>\n" + "</services>\n"; return servicesXml.getBytes(UTF_8); } /** Returns a dummy deployment xml which sets up the service identity for the tester, if present. */ private static byte[] deploymentXml(TesterId id, Optional<AthenzDomain> athenzDomain, Optional<AthenzService> athenzService) { String deploymentSpec = "<?xml version='1.0' encoding='UTF-8'?>\n" + "<deployment version=\"1.0\" " + athenzDomain.map(domain -> "athenz-domain=\"" + domain.value() + "\" ").orElse("") + athenzService.map(service -> "athenz-service=\"" + service.value() + "\" ").orElse("") + ">" + " <instance id=\"" + id.id().instance().value() + "\" />" + "</deployment>"; return deploymentSpec.getBytes(UTF_8); } /** Logger which logs to a {@link JobController}, as well as to the parent class' {@link Logger}. */ private class DualLogger { private final RunId id; private final Step step; private DualLogger(RunId id, Step step) { this.id = id; this.step = step; } private void log(String... messages) { log(INFO, List.of(messages)); } private void log(Level level, String... messages) { log(level, List.of(messages)); } private void logAll(List<LogEntry> messages) { controller.jobController().log(id, step, messages); } private void log(List<String> messages) { log(INFO, messages); } private void log(Level level, List<String> messages) { controller.jobController().log(id, step, level, messages); } private void log(Level level, String message) { log(level, message, null); } private void logWithInternalException(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); controller.jobController().log(id, step, level, message); } private void log(Level level, String message, Throwable thrown) { logger.log(level, id + " at " + step + ": " + message, thrown); if (thrown != null) { ByteArrayOutputStream traceBuffer = new ByteArrayOutputStream(); thrown.printStackTrace(new PrintStream(traceBuffer)); message += "\n" + traceBuffer; } controller.jobController().log(id, step, level, message); } } static class Timeouts { private final SystemName system; private Timeouts(SystemName system) { this.system = requireNonNull(system); } public static Timeouts of(SystemName system) { return new Timeouts(system); } Duration capacity() { return Duration.ofMinutes(system.isCd() ? 15 : 0); } Duration endpoint() { return Duration.ofMinutes(15); } Duration endpointCertificate() { return Duration.ofMinutes(20); } Duration tester() { return Duration.ofMinutes(30); } Duration statelessNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 60); } Duration statefulNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 720); } Duration noNodesDown() { return Duration.ofMinutes(system.isCd() ? 30 : 240); } Duration testerCertificate() { return Duration.ofMinutes(300); } } }
setDaemon() ?
public void start() { qrConfig = getConfig(QrConfig.class, true); reconfigure(qrConfig); hackToInitializeServer(qrConfig); ContainerBuilder builder = createBuilderWithGuiceBindings(); configurer = createConfigurer(builder.guiceModules().activate()); initializeAndActivateContainer(builder, () -> {}); reconfigurerThread.start(); portWatcher = new Thread(this::watchPortChange, "configured-application-port-watcher"); portWatcher.setDaemon(true); portWatcher.start(); if (setupRpc()) { slobrokRegistrator = registerInSlobrok(qrConfig); } }
reconfigurerThread.start();
public void start() { qrConfig = getConfig(QrConfig.class, true); reconfigure(qrConfig); hackToInitializeServer(qrConfig); ContainerBuilder builder = createBuilderWithGuiceBindings(); configurer = createConfigurer(builder.guiceModules().activate()); initializeAndActivateContainer(builder, () -> {}); reconfigurerThread.setDaemon(true); reconfigurerThread.start(); portWatcher.setDaemon(true); portWatcher.start(); if (setupRpc()) { slobrokRegistrator = registerInSlobrok(qrConfig); } }
class to be loaded, * which runs the static block. */ @SuppressWarnings("UnusedDeclaration") public static void ensureVespaLoggingInitialized() { }
class to be loaded, * which runs the static block. */ @SuppressWarnings("UnusedDeclaration") public static void ensureVespaLoggingInitialized() { }
I guess this should be outputFieldType instead of outputField?
protected void doVerify(VerificationContext context) { String outputField = context.getOutputField(); if (outputField == null) throw new VerificationException(this, "No output field in this statement: " + "Don't know what value to hash to."); DataType outputFieldType = context.getInputType(this, outputField); if (outputFieldType != DataType.INT && outputFieldType != DataType.LONG) throw new VerificationException(this, "The type of the output field " + outputField + " is not an int or long but " + outputField); targetType = outputFieldType; context.setValueType(createdOutputType()); }
" is not an int or long but " + outputField);
protected void doVerify(VerificationContext context) { String outputField = context.getOutputField(); if (outputField == null) throw new VerificationException(this, "No output field in this statement: " + "Don't know what value to hash to."); DataType outputFieldType = context.getInputType(this, outputField); if ( ! canStoreHash(outputFieldType)) throw new VerificationException(this, "The type of the output field " + outputField + " is not int or long but " + outputFieldType); targetType = outputFieldType; context.setValueType(createdOutputType()); }
class HashExpression extends Expression { private final HashFunction hasher = Hashing.sipHash24(); /** The destination the embedding will be written to on the form [schema name].[field name] */ private String destination; /** The target type we are embedding into. */ private DataType targetType; public HashExpression() { super(DataType.STRING); } @Override public void setStatementOutput(DocumentType documentType, Field field) { if (field.getDataType() != DataType.INT && field.getDataType() != DataType.LONG) throw new IllegalArgumentException("Cannot use the hash function on an indexing statement for " + field.getName() + ": The hash function can only be used when the target field is int or long, not " + field.getDataType()); targetType = field.getDataType(); } @Override protected void doExecute(ExecutionContext context) { StringFieldValue input = (StringFieldValue) context.getValue(); if (targetType.equals(DataType.INT)) context.setValue(new IntegerFieldValue(hashToInt(input.getString()))); else if (targetType.equals(DataType.LONG)) context.setValue(new LongFieldValue(hashToLong(input.getString()))); else throw new IllegalStateException(); } private int hashToInt(String value) { return hasher.hashString(value, StandardCharsets.UTF_8).asInt(); } private long hashToLong(String value) { return hasher.hashString(value, StandardCharsets.UTF_8).asLong(); } @Override @Override public DataType createdOutputType() { return targetType; } @Override public String toString() { return "embed"; } @Override public int hashCode() { return 1; } @Override public boolean equals(Object o) { return o instanceof EmbedExpression; } }
class HashExpression extends Expression { private final HashFunction hasher = Hashing.sipHash24(); /** The target type we are hashing into. */ private DataType targetType; public HashExpression() { super(DataType.STRING); } @Override public void setStatementOutput(DocumentType documentType, Field field) { if ( ! canStoreHash(field.getDataType())) throw new IllegalArgumentException("Cannot use the hash function on an indexing statement for " + field.getName() + ": The hash function can only be used when the target field " + "is int or long, not " + field.getDataType()); targetType = field.getDataType(); } @Override protected void doExecute(ExecutionContext context) { StringFieldValue input = (StringFieldValue) context.getValue(); if (targetType.equals(DataType.INT)) context.setValue(new IntegerFieldValue(hashToInt(input.getString()))); else if (targetType.equals(DataType.LONG)) context.setValue(new LongFieldValue(hashToLong(input.getString()))); else throw new IllegalStateException(); } private int hashToInt(String value) { return hasher.hashString(value, StandardCharsets.UTF_8).asInt(); } private long hashToLong(String value) { return hasher.hashString(value, StandardCharsets.UTF_8).asLong(); } @Override private boolean canStoreHash(DataType type) { if (type.equals(DataType.INT)) return true; if (type.equals(DataType.LONG)) return true; return false; } @Override public DataType createdOutputType() { return targetType; } @Override public String toString() { return "hash"; } @Override public int hashCode() { return 987; } @Override public boolean equals(Object o) { return o instanceof HashExpression; } }
The setup and execution is identical to that used in testIntHash(), except the data type and field name. Consider refactor into a helper function. As it stands I think it is harder to read as I was looking for differences between the two tests.
public void testLongHash() throws ParseException { var expression = Expression.fromString("input myText | hash | attribute 'myLong'"); SimpleTestAdapter adapter = new SimpleTestAdapter(); adapter.createField(new Field("myText", DataType.STRING)); var intField = new Field("myLong", DataType.LONG); adapter.createField(intField); adapter.setValue("myText", new StringFieldValue("input text")); expression.setStatementOutput(new DocumentType("myDocument"), intField); VerificationContext verificationContext = new VerificationContext(adapter); assertEquals(DataType.LONG, expression.verify(verificationContext)); ExecutionContext context = new ExecutionContext(adapter); context.setValue(new StringFieldValue("input text")); expression.execute(context); assertNotNull(context); assertTrue(adapter.values.containsKey("myLong")); assertEquals(7678158186624760752L, adapter.values.get("myLong").getWrappedValue()); }
expression.execute(context);
public void testLongHash() throws ParseException { var expression = Expression.fromString("input myText | hash | attribute 'myLong'"); SimpleTestAdapter adapter = new SimpleTestAdapter(); adapter.createField(new Field("myText", DataType.STRING)); var intField = new Field("myLong", DataType.LONG); adapter.createField(intField); adapter.setValue("myText", new StringFieldValue("input text")); expression.setStatementOutput(new DocumentType("myDocument"), intField); VerificationContext verificationContext = new VerificationContext(adapter); assertEquals(DataType.LONG, expression.verify(verificationContext)); ExecutionContext context = new ExecutionContext(adapter); context.setValue(new StringFieldValue("input text")); expression.execute(context); assertTrue(adapter.values.containsKey("myLong")); assertEquals(7678158186624760752L, adapter.values.get("myLong").getWrappedValue()); }
class ScriptTestCase { private final DocumentType type; public ScriptTestCase() { type = new DocumentType("mytype"); type.addField("in-1", DataType.STRING); type.addField("in-2", DataType.STRING); type.addField("out-1", DataType.STRING); type.addField("out-2", DataType.STRING); type.addField("mybool", DataType.BOOL); } @Test public void requireThatScriptExecutesStatements() { Document input = new Document(type, "id:scheme:mytype::"); input.setFieldValue("in-1", new StringFieldValue("6")); input.setFieldValue("in-2", new StringFieldValue("9")); Expression exp = new ScriptExpression( new StatementExpression(new InputExpression("in-1"), new AttributeExpression("out-1")), new StatementExpression(new InputExpression("in-2"), new AttributeExpression("out-2"))); Document output = Expression.execute(exp, input); assertNotNull(output); assertEquals(new StringFieldValue("6"), output.getFieldValue("out-1")); assertEquals(new StringFieldValue("9"), output.getFieldValue("out-2")); } @Test public void requireThatEachStatementHasEmptyInput() { Document input = new Document(type, "id:scheme:mytype::"); input.setFieldValue(input.getField("in-1"), new StringFieldValue("69")); Expression exp = new ScriptExpression( new StatementExpression(new InputExpression("in-1"), new AttributeExpression("out-1")), new StatementExpression(new AttributeExpression("out-2"))); try { exp.verify(input); fail(); } catch (VerificationException e) { assertTrue(e.getExpressionType().equals(ScriptExpression.class)); assertEquals("Expected any input, got null.", e.getMessage()); } } @Test public void requireThatFactoryMethodWorks() throws ParseException { Document input = new Document(type, "id:scheme:mytype::"); input.setFieldValue("in-1", new StringFieldValue("FOO")); Document output = Expression.execute(Expression.fromString("input 'in-1' | { index 'out-1'; lowercase | index 'out-2' }"), input); assertNotNull(output); assertEquals(new StringFieldValue("FOO"), output.getFieldValue("out-1")); assertEquals(new StringFieldValue("foo"), output.getFieldValue("out-2")); } @Test public void requireThatIfExpressionPassesOriginalInputAlong() throws ParseException { Document input = new Document(type, "id:scheme:mytype::"); Document output = Expression.execute(Expression.fromString("'foo' | if (1 < 2) { 'bar' | index 'out-1' } else { 'baz' | index 'out-1' } | index 'out-1'"), input); assertNotNull(output); assertEquals(new StringFieldValue("foo"), output.getFieldValue("out-1")); } @Test public void testLiteralBoolean() throws ParseException { Document input = new Document(type, "id:scheme:mytype::"); input.setFieldValue("in-1", new StringFieldValue("foo")); var expression = Expression.fromString("if (input 'in-1' == \"foo\") { true | summary 'mybool' | attribute 'mybool' }"); Document output = Expression.execute(expression, input); assertNotNull(output); assertEquals(new BoolFieldValue(true), output.getFieldValue("mybool")); } @Test public void testIntHash() throws ParseException { var expression = Expression.fromString("input myText | hash | attribute 'myInt'"); SimpleTestAdapter adapter = new SimpleTestAdapter(); adapter.createField(new Field("myText", DataType.STRING)); var intField = new Field("myInt", DataType.INT); adapter.createField(intField); adapter.setValue("myText", new StringFieldValue("input text")); expression.setStatementOutput(new DocumentType("myDocument"), intField); VerificationContext verificationContext = new VerificationContext(adapter); assertEquals(DataType.INT, expression.verify(verificationContext)); ExecutionContext context = new ExecutionContext(adapter); context.setValue(new StringFieldValue("input text")); expression.execute(context); assertNotNull(context); assertTrue(adapter.values.containsKey("myInt")); assertEquals(-1425622096, adapter.values.get("myInt").getWrappedValue()); } @Test @Test public void testEmbed() throws ParseException { TensorType tensorType = TensorType.fromSpec("tensor(d[4])"); var expression = Expression.fromString("input myText | embed | attribute 'myTensor'", new SimpleLinguistics(), new MockEmbedder("myDocument.myTensor")); SimpleTestAdapter adapter = new SimpleTestAdapter(); adapter.createField(new Field("myText", DataType.STRING)); var tensorField = new Field("myTensor", new TensorDataType(tensorType)); adapter.createField(tensorField); adapter.setValue("myText", new StringFieldValue("input text")); expression.setStatementOutput(new DocumentType("myDocument"), tensorField); VerificationContext verificationContext = new VerificationContext(adapter); assertEquals(TensorDataType.class, expression.verify(verificationContext).getClass()); ExecutionContext context = new ExecutionContext(adapter); context.setValue(new StringFieldValue("input text")); expression.execute(context); assertNotNull(context); assertTrue(adapter.values.containsKey("myTensor")); assertEquals(Tensor.from(tensorType, "[7,3,0,0]"), ((TensorFieldValue)adapter.values.get("myTensor")).getTensor().get()); } private static class MockEmbedder implements Embedder { private final String expectedDestination; public MockEmbedder(String expectedDestination) { this.expectedDestination = expectedDestination; } @Override public List<Integer> embed(String text, Embedder.Context context) { return null; } @Override public Tensor embed(String text, Embedder.Context context, TensorType tensorType) { assertEquals(expectedDestination, context.getDestination()); return Tensor.from(tensorType, "[7,3,0,0]"); } } }
class ScriptTestCase { private final DocumentType type; public ScriptTestCase() { type = new DocumentType("mytype"); type.addField("in-1", DataType.STRING); type.addField("in-2", DataType.STRING); type.addField("out-1", DataType.STRING); type.addField("out-2", DataType.STRING); type.addField("mybool", DataType.BOOL); } @Test public void requireThatScriptExecutesStatements() { Document input = new Document(type, "id:scheme:mytype::"); input.setFieldValue("in-1", new StringFieldValue("6")); input.setFieldValue("in-2", new StringFieldValue("9")); Expression exp = new ScriptExpression( new StatementExpression(new InputExpression("in-1"), new AttributeExpression("out-1")), new StatementExpression(new InputExpression("in-2"), new AttributeExpression("out-2"))); Document output = Expression.execute(exp, input); assertNotNull(output); assertEquals(new StringFieldValue("6"), output.getFieldValue("out-1")); assertEquals(new StringFieldValue("9"), output.getFieldValue("out-2")); } @Test public void requireThatEachStatementHasEmptyInput() { Document input = new Document(type, "id:scheme:mytype::"); input.setFieldValue(input.getField("in-1"), new StringFieldValue("69")); Expression exp = new ScriptExpression( new StatementExpression(new InputExpression("in-1"), new AttributeExpression("out-1")), new StatementExpression(new AttributeExpression("out-2"))); try { exp.verify(input); fail(); } catch (VerificationException e) { assertTrue(e.getExpressionType().equals(ScriptExpression.class)); assertEquals("Expected any input, got null.", e.getMessage()); } } @Test public void requireThatFactoryMethodWorks() throws ParseException { Document input = new Document(type, "id:scheme:mytype::"); input.setFieldValue("in-1", new StringFieldValue("FOO")); Document output = Expression.execute(Expression.fromString("input 'in-1' | { index 'out-1'; lowercase | index 'out-2' }"), input); assertNotNull(output); assertEquals(new StringFieldValue("FOO"), output.getFieldValue("out-1")); assertEquals(new StringFieldValue("foo"), output.getFieldValue("out-2")); } @Test public void requireThatIfExpressionPassesOriginalInputAlong() throws ParseException { Document input = new Document(type, "id:scheme:mytype::"); Document output = Expression.execute(Expression.fromString("'foo' | if (1 < 2) { 'bar' | index 'out-1' } else { 'baz' | index 'out-1' } | index 'out-1'"), input); assertNotNull(output); assertEquals(new StringFieldValue("foo"), output.getFieldValue("out-1")); } @Test public void testLiteralBoolean() throws ParseException { Document input = new Document(type, "id:scheme:mytype::"); input.setFieldValue("in-1", new StringFieldValue("foo")); var expression = Expression.fromString("if (input 'in-1' == \"foo\") { true | summary 'mybool' | attribute 'mybool' }"); Document output = Expression.execute(expression, input); assertNotNull(output); assertEquals(new BoolFieldValue(true), output.getFieldValue("mybool")); } @Test public void testIntHash() throws ParseException { var expression = Expression.fromString("input myText | hash | attribute 'myInt'"); SimpleTestAdapter adapter = new SimpleTestAdapter(); adapter.createField(new Field("myText", DataType.STRING)); var intField = new Field("myInt", DataType.INT); adapter.createField(intField); adapter.setValue("myText", new StringFieldValue("input text")); expression.setStatementOutput(new DocumentType("myDocument"), intField); VerificationContext verificationContext = new VerificationContext(adapter); assertEquals(DataType.INT, expression.verify(verificationContext)); ExecutionContext context = new ExecutionContext(adapter); context.setValue(new StringFieldValue("input text")); expression.execute(context); assertTrue(adapter.values.containsKey("myInt")); assertEquals(-1425622096, adapter.values.get("myInt").getWrappedValue()); } @Test @Test public void testEmbed() throws ParseException { TensorType tensorType = TensorType.fromSpec("tensor(d[4])"); var expression = Expression.fromString("input myText | embed | attribute 'myTensor'", new SimpleLinguistics(), new MockEmbedder("myDocument.myTensor")); SimpleTestAdapter adapter = new SimpleTestAdapter(); adapter.createField(new Field("myText", DataType.STRING)); var tensorField = new Field("myTensor", new TensorDataType(tensorType)); adapter.createField(tensorField); adapter.setValue("myText", new StringFieldValue("input text")); expression.setStatementOutput(new DocumentType("myDocument"), tensorField); VerificationContext verificationContext = new VerificationContext(adapter); assertEquals(TensorDataType.class, expression.verify(verificationContext).getClass()); ExecutionContext context = new ExecutionContext(adapter); context.setValue(new StringFieldValue("input text")); expression.execute(context); assertTrue(adapter.values.containsKey("myTensor")); assertEquals(Tensor.from(tensorType, "[7,3,0,0]"), ((TensorFieldValue)adapter.values.get("myTensor")).getTensor().get()); } private static class MockEmbedder implements Embedder { private final String expectedDestination; public MockEmbedder(String expectedDestination) { this.expectedDestination = expectedDestination; } @Override public List<Integer> embed(String text, Embedder.Context context) { return null; } @Override public Tensor embed(String text, Embedder.Context context, TensorType tensorType) { assertEquals(expectedDestination, context.getDestination()); return Tensor.from(tensorType, "[7,3,0,0]"); } } }
Yes, thanks.
protected void doVerify(VerificationContext context) { String outputField = context.getOutputField(); if (outputField == null) throw new VerificationException(this, "No output field in this statement: " + "Don't know what value to hash to."); DataType outputFieldType = context.getInputType(this, outputField); if (outputFieldType != DataType.INT && outputFieldType != DataType.LONG) throw new VerificationException(this, "The type of the output field " + outputField + " is not an int or long but " + outputField); targetType = outputFieldType; context.setValueType(createdOutputType()); }
" is not an int or long but " + outputField);
protected void doVerify(VerificationContext context) { String outputField = context.getOutputField(); if (outputField == null) throw new VerificationException(this, "No output field in this statement: " + "Don't know what value to hash to."); DataType outputFieldType = context.getInputType(this, outputField); if ( ! canStoreHash(outputFieldType)) throw new VerificationException(this, "The type of the output field " + outputField + " is not int or long but " + outputFieldType); targetType = outputFieldType; context.setValueType(createdOutputType()); }
class HashExpression extends Expression { private final HashFunction hasher = Hashing.sipHash24(); /** The destination the embedding will be written to on the form [schema name].[field name] */ private String destination; /** The target type we are embedding into. */ private DataType targetType; public HashExpression() { super(DataType.STRING); } @Override public void setStatementOutput(DocumentType documentType, Field field) { if (field.getDataType() != DataType.INT && field.getDataType() != DataType.LONG) throw new IllegalArgumentException("Cannot use the hash function on an indexing statement for " + field.getName() + ": The hash function can only be used when the target field is int or long, not " + field.getDataType()); targetType = field.getDataType(); } @Override protected void doExecute(ExecutionContext context) { StringFieldValue input = (StringFieldValue) context.getValue(); if (targetType.equals(DataType.INT)) context.setValue(new IntegerFieldValue(hashToInt(input.getString()))); else if (targetType.equals(DataType.LONG)) context.setValue(new LongFieldValue(hashToLong(input.getString()))); else throw new IllegalStateException(); } private int hashToInt(String value) { return hasher.hashString(value, StandardCharsets.UTF_8).asInt(); } private long hashToLong(String value) { return hasher.hashString(value, StandardCharsets.UTF_8).asLong(); } @Override @Override public DataType createdOutputType() { return targetType; } @Override public String toString() { return "embed"; } @Override public int hashCode() { return 1; } @Override public boolean equals(Object o) { return o instanceof EmbedExpression; } }
class HashExpression extends Expression { private final HashFunction hasher = Hashing.sipHash24(); /** The target type we are hashing into. */ private DataType targetType; public HashExpression() { super(DataType.STRING); } @Override public void setStatementOutput(DocumentType documentType, Field field) { if ( ! canStoreHash(field.getDataType())) throw new IllegalArgumentException("Cannot use the hash function on an indexing statement for " + field.getName() + ": The hash function can only be used when the target field " + "is int or long, not " + field.getDataType()); targetType = field.getDataType(); } @Override protected void doExecute(ExecutionContext context) { StringFieldValue input = (StringFieldValue) context.getValue(); if (targetType.equals(DataType.INT)) context.setValue(new IntegerFieldValue(hashToInt(input.getString()))); else if (targetType.equals(DataType.LONG)) context.setValue(new LongFieldValue(hashToLong(input.getString()))); else throw new IllegalStateException(); } private int hashToInt(String value) { return hasher.hashString(value, StandardCharsets.UTF_8).asInt(); } private long hashToLong(String value) { return hasher.hashString(value, StandardCharsets.UTF_8).asLong(); } @Override private boolean canStoreHash(DataType type) { if (type.equals(DataType.INT)) return true; if (type.equals(DataType.LONG)) return true; return false; } @Override public DataType createdOutputType() { return targetType; } @Override public String toString() { return "hash"; } @Override public int hashCode() { return 987; } @Override public boolean equals(Object o) { return o instanceof HashExpression; } }
Tangentially related to this PR; we should stop using identifiers containing double underscores. These are reserved for internal compiler/libc usage and I can't see any point in actually having them here. Just makes the code less readable, and since we seem to be using config input names verbatim anyway this doesn't help if a name collides with a reserved C++ keyword...
void writeHeaderHeader(Writer w, CNode root) throws IOException { String [] namespaceList = generateCppNameSpace(root); String namespacePrint = generateCppNameSpaceString(namespaceList); String namespaceDefine = generateCppNameSpaceDefine(namespaceList); String className = getTypeName(root, false); String defineName = namespaceDefine + "_" + getDefineName(className); w.write("" + "/**\n" + " * @class " + namespacePrint + "::" + className + "\n" + " * @ingroup config\n" + " *\n" + " * @brief This is an autogenerated class for handling VESPA config.\n" + " *\n" + " * This class is autogenerated by vespa from a config definition file.\n" + " * To subscribe to config, you need to include the config/config.h header, \n" + " * and create a ConfigSubscriber in order to subscribe for config.\n" ); if (root.getComment().length() > 0) { w.write(" *\n"); StringTokenizer st = new StringTokenizer(root.getComment(), "\n"); while (st.hasMoreTokens()) { w.write(" * " + st.nextToken() + "\n"); } } w.write("" + " */\n" + " + " + "\n" + " + " + "\n"); w.write("namespace config {\n"); w.write(" class ConfigValue;\n"); w.write(" class ConfigPayload;\n"); w.write("}\n\n"); w.write("namespace vespalib::slime {\n"); w.write(" struct Inspector;\n"); w.write(" struct Cursor;\n"); w.write("}\n\n"); writeNameSpaceBegin(w, namespaceList); w.write("\nnamespace internal {\n\n"); w.write("" + "/**\n" + " * This class contains the config. DO NOT USE THIS CLASS DIRECTLY. Use the typedeffed\n" + " * versions after this class declaration.\n" + " */\n" + "class Internal" + className + "Type : public ::config::ConfigInstance\n" + "{\n" ); } void writeTypeDeclarations(Writer w, CNode node, String indent) throws IOException { java.util.Set<String> declaredTypes = new java.util.HashSet<>(); for (CNode child : node.getChildren()) { boolean complexType = (child instanceof InnerCNode || child instanceof LeafCNode.EnumLeaf); if (complexType && !declaredTypes.contains(child.getName())) { String typeName = getTypeName(child, false); declaredTypes.add(child.getName()); if (child instanceof LeafCNode.EnumLeaf) { w.write(indent + "enum class " + typeName + " { "); LeafCNode.EnumLeaf leaf = (LeafCNode.EnumLeaf) child; for (int i=0; i<leaf.getLegalValues().length; ++i) { if (i != 0) { w.write(", "); } w.write(leaf.getLegalValues()[i]); } w.write(" };\n" + indent + "typedef std::vector<" + typeName + "> " + typeName + "Vector;" + "\n" + indent + "typedef std::map<vespalib::string, " + typeName + "> " + typeName + "Map;" + "\n" + indent + "static " + typeName + " get" + typeName + "(const vespalib::string&);\n" + indent + "static vespalib::string get" + typeName + "Name(" + typeName + " e);\n" + "\n" ); w.write(indent + "struct Internal" + typeName + "Converter {\n"); w.write(indent + " " + typeName + " operator()(const ::vespalib::string & __fieldName, const ::vespalib::slime::Inspector & __inspector);\n"); w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector);\n"); w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector, " + typeName + " __eDefault);\n"); w.write(indent + "};\n"); } else { w.write(indent + "class " + typeName + " {\n"); w.write(indent + "public:\n"); writeTypeDeclarations(w, child, indent + " "); writeStructFunctionDeclarations(w, getTypeName(child, false), child, indent + " "); writeMembers(w, child, indent + " "); w.write(indent + "};\n"); w.write(indent + "typedef std::vector<" + typeName + "> " + typeName + "Vector;\n\n"); w.write(indent + "typedef std::map<vespalib::string, " + typeName + "> " + typeName + "Map;\n\n"); } } } } void writeHeaderFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException { w.write("" + indent + "const vespalib::string & defName() const override { return CONFIG_DEF_NAME; }\n" + indent + "const vespalib::string & defMd5() const override { return CONFIG_DEF_MD5; }\n" + indent + "const vespalib::string & defNamespace() const override { return CONFIG_DEF_NAMESPACE; }\n" + indent + "void serialize(::config::ConfigDataBuffer & __buffer) const override;\n"); writeConfigClassFunctionDeclarations(w, "Internal" + className + "Type", node, indent); } void writeConfigClassFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException { w.write(indent + className + "(const ::config::ConfigValue & __value);\n"); w.write(indent + className + "(const ::config::ConfigDataBuffer & __value);\n"); w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n"); writeCommonFunctionDeclarations(w, className, node, indent); } void writeStructFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException { w.write(indent + className + "(const " + vectorTypeDefs.get("vespalib::string") + " & __lines);\n"); w.write(indent + className + "(const vespalib::slime::Inspector & __inspector);\n"); w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n"); writeCommonFunctionDeclarations(w, className, node, indent); w.write(indent + "void serialize(vespalib::slime::Cursor & __cursor) const;\n"); } void writeClassCopyConstructorDeclaration(Writer w, String className, NoExceptSpecifier noexcept, String indent) throws IOException { w.write(indent + className + "(const " + className + " & __rhs)" + noexcept + ";\n"); } void writeClassAssignmentOperatorDeclaration(Writer w, String className, NoExceptSpecifier noexcept, String indent) throws IOException { w.write(indent + className + " & operator = (const " + className + " & __rhs)" + noexcept + ";\n"); } void writeClassMoveConstructorDeclaration(Writer w, String className, NoExceptSpecifier noexcept, String indent) throws IOException { w.write(indent + className + "(" + className + " && __rhs)" + noexcept + ";\n"); } void writeClassMoveOperatorDeclaration(Writer w, String className, NoExceptSpecifier noexcept, String indent) throws IOException { w.write(indent + className + " & operator = (" + className + " && __rhs)" + noexcept + ";\n"); } void writeConfigClassCopyConstructorDefinition(Writer w, String parent, String className, NoExceptSpecifier noexcept) throws IOException { w.write(parent + "::" + className + "(const " + className + " & __rhs)" + noexcept + " = default;\n"); } void writeConfigClassAssignmentOperatorDefinition(Writer w, String parent, String className, NoExceptSpecifier noexcept) throws IOException { w.write(parent + " & " + parent + "::" + "operator =(const " + className + " & __rhs)" + noexcept + " = default;\n"); } void writeConfigClassMoveConstructorDefinition(Writer w, String parent, String className, NoExceptSpecifier noexcept) throws IOException { w.write(parent + "::" + className + "(" + className + " && __rhs)" + noexcept + " = default;\n"); } void writeConfigClassMoveOperatorDefinition(Writer w, String parent, String className, NoExceptSpecifier noexcept) throws IOException { w.write(parent + " & " + parent + "::" + "operator =(" + className + " && __rhs)" + noexcept + " = default;\n"); } void writeClassCopyConstructorDefinition(Writer w, String parent, CNode node) throws IOException { String typeName = getTypeName(node, false); NoExceptSpecifier noexcept = new NoExceptSpecifier(node); w.write(parent + "::" + typeName + "(const " + typeName + " & __rhs)" + noexcept + " = default;\n"); } void writeClassMoveConstructorDefinition(Writer w, String parent, CNode node) throws IOException { String typeName = getTypeName(node, false); NoExceptSpecifier noexcept = new NoExceptSpecifier(node); w.write(parent + "::" + typeName + "(" + typeName + " && __rhs)" + noexcept + " = default;\n"); } void writeClassAssignmentOperatorDefinition(Writer w, String parent, CNode node) throws IOException { String typeName = getTypeName(node, false); NoExceptSpecifier noexcept = new NoExceptSpecifier(node); w.write(parent + " & " + parent + "::" + "operator = (const " + typeName + " & __rhs)" + noexcept + " = default;\n"); } void writeClassMoveOperatorDefinition(Writer w, String parent, CNode node) throws IOException { String typeName = getTypeName(node, false); NoExceptSpecifier noexcept = new NoExceptSpecifier(node); w.write(parent + " & " + parent + "::" + "operator = (" + typeName + " && __rhs)" + noexcept + " = default;\n"); } void writeDestructor(Writer w, String parent, String className) throws IOException { w.write(parent + "~" + className + "() = default; \n"); } void writeCommonFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException { NoExceptSpecifier noexcept = new NoExceptSpecifier(node); w.write("" + indent + className + "() " + noexcept + ";\n"); writeClassCopyConstructorDeclaration(w, className, noexcept, indent); writeClassAssignmentOperatorDeclaration(w, className, noexcept, indent); writeClassMoveConstructorDeclaration(w, className, noexcept, indent); writeClassMoveOperatorDeclaration(w, className, noexcept, indent); w.write("" + indent + "~" + className + "();\n"); w.write("\n" + indent + "bool operator==(const " + className + "& __rhs) const noexcept;\n" + indent + "bool operator!=(const " + className + "& __rhs) const noexcept;\n" + "\n" ); } static String getTypeName(CNode node, boolean includeArray) { String type = null; if (node instanceof InnerCNode) { InnerCNode innerNode = (InnerCNode) node; type = getTypeName(innerNode.getName()); } else if (node instanceof LeafCNode) { LeafCNode leaf = (LeafCNode) node; if (leaf.getType().equals("bool")) { type = "bool"; } else if (leaf.getType().equals("int")) { type = "int32_t"; } else if (leaf.getType().equals("long")) { type = "int64_t"; } else if (leaf.getType().equals("double")) { type = "double"; } else if (leaf.getType().equals("enum")) { type = getTypeName(node.getName()); } else if (leaf.getType().equals("string")) { type = "vespalib::string"; } else if (leaf.getType().equals("reference")) { type = "vespalib::string"; } else if (leaf.getType().equals("file")) { type = "vespalib::string"; } else { throw new IllegalArgumentException("Unknown leaf datatype " + leaf.getType()); } } if (type == null) { throw new IllegalArgumentException("Unknown node " + node); } if (node.isArray && includeArray) { if (vectorTypeDefs.containsKey(type)) { type = vectorTypeDefs.get(type); } else { type = type + "Vector"; } } else if (node.isMap && includeArray) { if (mapTypeDefs.containsKey(type)) { type = mapTypeDefs.get(type); } else { type = type + "Map"; } } return type; } void writeStaticMemberDeclarations(Writer w, String indent) throws IOException { w.write("" + indent + "static const vespalib::string CONFIG_DEF_MD5;\n" + indent + "static const vespalib::string CONFIG_DEF_VERSION;\n" + indent + "static const vespalib::string CONFIG_DEF_NAME;\n" + indent + "static const vespalib::string CONFIG_DEF_NAMESPACE;\n" + indent + "static const ::config::StringVector CONFIG_DEF_SCHEMA;\n" + indent + "static const int64_t CONFIG_DEF_SERIALIZE_VERSION;\n" + "\n" ); } void writeComment(Writer w, String indent, String comment, boolean javadoc) throws IOException { if (javadoc && comment.indexOf('\n') == -1 && comment.length() <= 80 - (indent.length() + 7)) { w.write(indent + "/** " + comment + " */\n"); return; } else if (!javadoc && comment.indexOf('\n') == -1 && comment.length() <= 80 - (indent.length() + 3)) { w.write(indent + " return; } int maxLineLen = 80 - (indent.length() + 3); if (javadoc) w.write(indent + "/**\n"); do { String current; int newLine = comment.indexOf('\n'); if (newLine == -1) { current = comment; comment = ""; } else { current = comment.substring(0, newLine); comment = comment.substring(newLine + 1); } if (current.length() > maxLineLen) { int spaceIndex = current.lastIndexOf(' ', maxLineLen); if (spaceIndex >= maxLineLen - 15) { comment = current.substring(spaceIndex + 1) + "\n" + comment; current = current.substring(0, spaceIndex); } else { comment = current.substring(maxLineLen) + "\n" + comment; current = current.substring(0, maxLineLen) + "-"; } } w.write(indent + (javadoc ? " * " : " } while (comment.length() > 0); if (javadoc) w.write(indent + " */\n"); } void writeMembers(Writer w, CNode node, String indent) throws IOException { for (CNode child : node.getChildren()) { String typeName = getTypeName(child, true); if (child.getComment().length() > 0) { String comment = child.getComment(); int index; do { index = comment.indexOf("\n\n"); if (index == -1) break; String next = comment.substring(0, index); comment = comment.substring(index + 2); w.write("\n"); writeComment(w, indent, next, false); } while (true); w.write("\n"); writeComment(w, indent, comment, true); } w.write(indent + typeName + " " + getIdentifier(child.getName()) + ";"); if (child instanceof LeafCNode) { LeafCNode leaf = (LeafCNode) child; DefaultValue value = leaf.getDefaultValue(); if (value != null) { w.write(" } } w.write("\n"); } } void writeHeaderTypeDefs(Writer w, CNode root, String indent) throws IOException { w.write(indent + "typedef std::unique_ptr<const " + getInternalClassName(root) + "> UP;\n"); } private static String getInternalClassName(CNode root) { return "Internal" + getTypeName(root, false) + "Type"; } void writeHeaderFooter(Writer w, CNode root) throws IOException { String [] namespaceList = generateCppNameSpace(root); String namespaceDefine = generateCppNameSpaceDefine(namespaceList); String className = getTypeName(root, false); String defineName = namespaceDefine + "_" + getDefineName(className); w.write("" + "};\n" + "\n" + "} w.write("typedef internal::" + getInternalClassName(root) + " " + className + "ConfigBuilder;\n"); w.write("typedef const internal::" + getInternalClassName(root) + " " + className + "Config;\n"); w.write("\n"); writeNameSpaceEnd(w, namespaceList); w.write(" }
w.write(indent + className + "(const " + vectorTypeDefs.get("vespalib::string") + " & __lines);\n");
void writeHeaderHeader(Writer w, CNode root) throws IOException { String [] namespaceList = generateCppNameSpace(root); String namespacePrint = generateCppNameSpaceString(namespaceList); String namespaceDefine = generateCppNameSpaceDefine(namespaceList); String className = getTypeName(root, false); String defineName = namespaceDefine + "_" + getDefineName(className); w.write("" + "/**\n" + " * @class " + namespacePrint + "::" + className + "\n" + " * @ingroup config\n" + " *\n" + " * @brief This is an autogenerated class for handling VESPA config.\n" + " *\n" + " * This class is autogenerated by vespa from a config definition file.\n" + " * To subscribe to config, you need to include the config/config.h header, \n" + " * and create a ConfigSubscriber in order to subscribe for config.\n" ); if (root.getComment().length() > 0) { w.write(" *\n"); StringTokenizer st = new StringTokenizer(root.getComment(), "\n"); while (st.hasMoreTokens()) { w.write(" * " + st.nextToken() + "\n"); } } w.write("" + " */\n" + " + " + "\n" + " + " + "\n"); w.write("namespace config {\n"); w.write(" class ConfigValue;\n"); w.write(" class ConfigPayload;\n"); w.write("}\n\n"); w.write("namespace vespalib::slime {\n"); w.write(" struct Inspector;\n"); w.write(" struct Cursor;\n"); w.write("}\n\n"); writeNameSpaceBegin(w, namespaceList); w.write("\nnamespace internal {\n\n"); w.write("" + "/**\n" + " * This class contains the config. DO NOT USE THIS CLASS DIRECTLY. Use the typedeffed\n" + " * versions after this class declaration.\n" + " */\n" + "class Internal" + className + "Type : public ::config::ConfigInstance\n" + "{\n" ); } void writeTypeDeclarations(Writer w, CNode node, String indent) throws IOException { java.util.Set<String> declaredTypes = new java.util.HashSet<>(); for (CNode child : node.getChildren()) { boolean complexType = (child instanceof InnerCNode || child instanceof LeafCNode.EnumLeaf); if (complexType && !declaredTypes.contains(child.getName())) { String typeName = getTypeName(child, false); declaredTypes.add(child.getName()); if (child instanceof LeafCNode.EnumLeaf) { w.write(indent + "enum class " + typeName + " { "); LeafCNode.EnumLeaf leaf = (LeafCNode.EnumLeaf) child; for (int i=0; i<leaf.getLegalValues().length; ++i) { if (i != 0) { w.write(", "); } w.write(leaf.getLegalValues()[i]); } w.write(" };\n" + indent + "typedef std::vector<" + typeName + "> " + typeName + "Vector;" + "\n" + indent + "typedef std::map<vespalib::string, " + typeName + "> " + typeName + "Map;" + "\n" + indent + "static " + typeName + " get" + typeName + "(const vespalib::string&);\n" + indent + "static vespalib::string get" + typeName + "Name(" + typeName + " e);\n" + "\n" ); w.write(indent + "struct Internal" + typeName + "Converter {\n"); w.write(indent + " " + typeName + " operator()(const ::vespalib::string & __fieldName, const ::vespalib::slime::Inspector & __inspector);\n"); w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector);\n"); w.write(indent + " " + typeName + " operator()(const ::vespalib::slime::Inspector & __inspector, " + typeName + " __eDefault);\n"); w.write(indent + "};\n"); } else { w.write(indent + "class " + typeName + " {\n"); w.write(indent + "public:\n"); writeTypeDeclarations(w, child, indent + " "); writeStructFunctionDeclarations(w, getTypeName(child, false), child, indent + " "); writeMembers(w, child, indent + " "); w.write(indent + "};\n"); w.write(indent + "typedef std::vector<" + typeName + "> " + typeName + "Vector;\n\n"); w.write(indent + "typedef std::map<vespalib::string, " + typeName + "> " + typeName + "Map;\n\n"); } } } } void writeHeaderFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException { w.write("" + indent + "const vespalib::string & defName() const override { return CONFIG_DEF_NAME; }\n" + indent + "const vespalib::string & defMd5() const override { return CONFIG_DEF_MD5; }\n" + indent + "const vespalib::string & defNamespace() const override { return CONFIG_DEF_NAMESPACE; }\n" + indent + "void serialize(::config::ConfigDataBuffer & __buffer) const override;\n"); writeConfigClassFunctionDeclarations(w, "Internal" + className + "Type", node, indent); } void writeConfigClassFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException { w.write(indent + className + "(const ::config::ConfigValue & __value);\n"); w.write(indent + className + "(const ::config::ConfigDataBuffer & __value);\n"); w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n"); writeCommonFunctionDeclarations(w, className, node, indent); } void writeStructFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException { w.write(indent + className + "(const " + vectorTypeDefs.get("vespalib::string") + " & __lines);\n"); w.write(indent + className + "(const vespalib::slime::Inspector & __inspector);\n"); w.write(indent + className + "(const ::config::ConfigPayload & __payload);\n"); writeCommonFunctionDeclarations(w, className, node, indent); w.write(indent + "void serialize(vespalib::slime::Cursor & __cursor) const;\n"); } void writeClassCopyConstructorDeclaration(Writer w, String className, NoExceptSpecifier noexcept, String indent) throws IOException { w.write(indent + className + "(const " + className + " & __rhs)" + noexcept + ";\n"); } void writeClassAssignmentOperatorDeclaration(Writer w, String className, NoExceptSpecifier noexcept, String indent) throws IOException { w.write(indent + className + " & operator = (const " + className + " & __rhs)" + noexcept + ";\n"); } void writeClassMoveConstructorDeclaration(Writer w, String className, NoExceptSpecifier noexcept, String indent) throws IOException { w.write(indent + className + "(" + className + " && __rhs)" + noexcept + ";\n"); } void writeClassMoveOperatorDeclaration(Writer w, String className, NoExceptSpecifier noexcept, String indent) throws IOException { w.write(indent + className + " & operator = (" + className + " && __rhs)" + noexcept + ";\n"); } void writeConfigClassCopyConstructorDefinition(Writer w, String parent, String className, NoExceptSpecifier noexcept) throws IOException { w.write(parent + "::" + className + "(const " + className + " & __rhs)" + noexcept + " = default;\n"); } void writeConfigClassAssignmentOperatorDefinition(Writer w, String parent, String className, NoExceptSpecifier noexcept) throws IOException { w.write(parent + " & " + parent + "::" + "operator =(const " + className + " & __rhs)" + noexcept + " = default;\n"); } void writeConfigClassMoveConstructorDefinition(Writer w, String parent, String className, NoExceptSpecifier noexcept) throws IOException { w.write(parent + "::" + className + "(" + className + " && __rhs)" + noexcept + " = default;\n"); } void writeConfigClassMoveOperatorDefinition(Writer w, String parent, String className, NoExceptSpecifier noexcept) throws IOException { w.write(parent + " & " + parent + "::" + "operator =(" + className + " && __rhs)" + noexcept + " = default;\n"); } void writeClassCopyConstructorDefinition(Writer w, String parent, CNode node) throws IOException { String typeName = getTypeName(node, false); NoExceptSpecifier noexcept = new NoExceptSpecifier(node); w.write(parent + "::" + typeName + "(const " + typeName + " & __rhs)" + noexcept + " = default;\n"); } void writeClassMoveConstructorDefinition(Writer w, String parent, CNode node) throws IOException { String typeName = getTypeName(node, false); NoExceptSpecifier noexcept = new NoExceptSpecifier(node); w.write(parent + "::" + typeName + "(" + typeName + " && __rhs)" + noexcept + " = default;\n"); } void writeClassAssignmentOperatorDefinition(Writer w, String parent, CNode node) throws IOException { String typeName = getTypeName(node, false); NoExceptSpecifier noexcept = new NoExceptSpecifier(node); w.write(parent + " & " + parent + "::" + "operator = (const " + typeName + " & __rhs)" + noexcept + " = default;\n"); } void writeClassMoveOperatorDefinition(Writer w, String parent, CNode node) throws IOException { String typeName = getTypeName(node, false); NoExceptSpecifier noexcept = new NoExceptSpecifier(node); w.write(parent + " & " + parent + "::" + "operator = (" + typeName + " && __rhs)" + noexcept + " = default;\n"); } void writeDestructor(Writer w, String parent, String className) throws IOException { w.write(parent + "~" + className + "() = default; \n"); } void writeCommonFunctionDeclarations(Writer w, String className, CNode node, String indent) throws IOException { NoExceptSpecifier noexcept = new NoExceptSpecifier(node); w.write("" + indent + className + "() " + noexcept + ";\n"); writeClassCopyConstructorDeclaration(w, className, noexcept, indent); writeClassAssignmentOperatorDeclaration(w, className, noexcept, indent); writeClassMoveConstructorDeclaration(w, className, noexcept, indent); writeClassMoveOperatorDeclaration(w, className, noexcept, indent); w.write("" + indent + "~" + className + "();\n"); w.write("\n" + indent + "bool operator==(const " + className + "& __rhs) const noexcept;\n" + indent + "bool operator!=(const " + className + "& __rhs) const noexcept;\n" + "\n" ); } static String getTypeName(CNode node, boolean includeArray) { String type = null; if (node instanceof InnerCNode) { InnerCNode innerNode = (InnerCNode) node; type = getTypeName(innerNode.getName()); } else if (node instanceof LeafCNode) { LeafCNode leaf = (LeafCNode) node; if (leaf.getType().equals("bool")) { type = "bool"; } else if (leaf.getType().equals("int")) { type = "int32_t"; } else if (leaf.getType().equals("long")) { type = "int64_t"; } else if (leaf.getType().equals("double")) { type = "double"; } else if (leaf.getType().equals("enum")) { type = getTypeName(node.getName()); } else if (leaf.getType().equals("string")) { type = "vespalib::string"; } else if (leaf.getType().equals("reference")) { type = "vespalib::string"; } else if (leaf.getType().equals("file")) { type = "vespalib::string"; } else { throw new IllegalArgumentException("Unknown leaf datatype " + leaf.getType()); } } if (type == null) { throw new IllegalArgumentException("Unknown node " + node); } if (node.isArray && includeArray) { if (vectorTypeDefs.containsKey(type)) { type = vectorTypeDefs.get(type); } else { type = type + "Vector"; } } else if (node.isMap && includeArray) { if (mapTypeDefs.containsKey(type)) { type = mapTypeDefs.get(type); } else { type = type + "Map"; } } return type; } void writeStaticMemberDeclarations(Writer w, String indent) throws IOException { w.write("" + indent + "static const vespalib::string CONFIG_DEF_MD5;\n" + indent + "static const vespalib::string CONFIG_DEF_VERSION;\n" + indent + "static const vespalib::string CONFIG_DEF_NAME;\n" + indent + "static const vespalib::string CONFIG_DEF_NAMESPACE;\n" + indent + "static const ::config::StringVector CONFIG_DEF_SCHEMA;\n" + indent + "static const int64_t CONFIG_DEF_SERIALIZE_VERSION;\n" + "\n" ); } void writeComment(Writer w, String indent, String comment, boolean javadoc) throws IOException { if (javadoc && comment.indexOf('\n') == -1 && comment.length() <= 80 - (indent.length() + 7)) { w.write(indent + "/** " + comment + " */\n"); return; } else if (!javadoc && comment.indexOf('\n') == -1 && comment.length() <= 80 - (indent.length() + 3)) { w.write(indent + " return; } int maxLineLen = 80 - (indent.length() + 3); if (javadoc) w.write(indent + "/**\n"); do { String current; int newLine = comment.indexOf('\n'); if (newLine == -1) { current = comment; comment = ""; } else { current = comment.substring(0, newLine); comment = comment.substring(newLine + 1); } if (current.length() > maxLineLen) { int spaceIndex = current.lastIndexOf(' ', maxLineLen); if (spaceIndex >= maxLineLen - 15) { comment = current.substring(spaceIndex + 1) + "\n" + comment; current = current.substring(0, spaceIndex); } else { comment = current.substring(maxLineLen) + "\n" + comment; current = current.substring(0, maxLineLen) + "-"; } } w.write(indent + (javadoc ? " * " : " } while (comment.length() > 0); if (javadoc) w.write(indent + " */\n"); } void writeMembers(Writer w, CNode node, String indent) throws IOException { for (CNode child : node.getChildren()) { String typeName = getTypeName(child, true); if (child.getComment().length() > 0) { String comment = child.getComment(); int index; do { index = comment.indexOf("\n\n"); if (index == -1) break; String next = comment.substring(0, index); comment = comment.substring(index + 2); w.write("\n"); writeComment(w, indent, next, false); } while (true); w.write("\n"); writeComment(w, indent, comment, true); } w.write(indent + typeName + " " + getIdentifier(child.getName()) + ";"); if (child instanceof LeafCNode) { LeafCNode leaf = (LeafCNode) child; DefaultValue value = leaf.getDefaultValue(); if (value != null) { w.write(" } } w.write("\n"); } } void writeHeaderTypeDefs(Writer w, CNode root, String indent) throws IOException { w.write(indent + "typedef std::unique_ptr<const " + getInternalClassName(root) + "> UP;\n"); } private static String getInternalClassName(CNode root) { return "Internal" + getTypeName(root, false) + "Type"; } void writeHeaderFooter(Writer w, CNode root) throws IOException { String [] namespaceList = generateCppNameSpace(root); String namespaceDefine = generateCppNameSpaceDefine(namespaceList); String className = getTypeName(root, false); String defineName = namespaceDefine + "_" + getDefineName(className); w.write("" + "};\n" + "\n" + "} w.write("typedef internal::" + getInternalClassName(root) + " " + className + "ConfigBuilder;\n"); w.write("typedef const internal::" + getInternalClassName(root) + " " + className + "Config;\n"); w.write("\n"); writeNameSpaceEnd(w, namespaceList); w.write(" }
class NoExceptSpecifier { private final boolean enabled; public NoExceptSpecifier(CNode node) { enabled = checkNode(node); } private static boolean checkNode(CNode node) { if (node instanceof InnerCNode) { for (CNode child: node.getChildren()) { if (child.isArray || child.isMap) { return false; } if (!checkNode(child)) { return false; } } } return true; } public String toString() { if (enabled) { return " noexcept"; } else { return ""; } } }
class NoExceptSpecifier { private final boolean enabled; public NoExceptSpecifier(CNode node) { enabled = checkNode(node); } private static boolean checkNode(CNode node) { if (node instanceof InnerCNode) { for (CNode child: node.getChildren()) { if (child.isArray || child.isMap) { return false; } if (!checkNode(child)) { return false; } } } return true; } public String toString() { if (enabled) { return " noexcept"; } else { return ""; } } }