comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
why are deleted files not dropped from the cache automatically?
private void triggerCompression(String oldFileName) { try { String zippedFileName = oldFileName + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFileName).append(" > ").append(zippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); File oldFile = new File(oldFileName); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(zippedFileName)); } catch (IOException e) { } }
nativeIO.dropFileFromCache(oldFile);
private void triggerCompression(String oldFileName) { try { String gzippedFileName = oldFileName + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFileName).append(" > ").append(gzippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); File oldFile = new File(oldFileName); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(gzippedFileName)); } catch (IOException e) { } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
True. But then we should perhaps set it to 0 in ContainerCluster's getConfig() and override it only in ApplicationContainerCluster.
public void getConfig(QrStartConfig.Builder builder) { builder.jvm.heapsize(512); builder.jvm.heapSizeAsPercentageOfPhysicalMemory(0); builder.jvm.availableProcessors(2); builder.jvm.verbosegc(false); }
builder.jvm.heapSizeAsPercentageOfPhysicalMemory(0);
public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .heapsize(512) .heapSizeAsPercentageOfPhysicalMemory(0); }
class ClusterControllerContainer extends Container implements BundlesConfig.Producer, ZookeeperServerConfig.Producer, QrStartConfig.Producer { private static final ComponentSpecification CLUSTERCONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-apps"); private static final ComponentSpecification ZKFACADE_BUNDLE = new ComponentSpecification("zkfacade"); private final Set<String> bundles = new TreeSet<>(); public ClusterControllerContainer(AbstractConfigProducer parent, int index, boolean runStandaloneZooKeeper, boolean isHosted) { super(parent, "" + index, index); addHandler( new Handler(new ComponentModel(new BundleInstantiationSpecification( new ComponentSpecification("clustercontroller-status"), new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StatusHandler"), CLUSTERCONTROLLER_BUNDLE))), "clustercontroller-status/*" ); addHandler( new Handler(new ComponentModel(new BundleInstantiationSpecification( new ComponentSpecification("clustercontroller-state-restapi-v2"), new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StateRestApiV2Handler"), CLUSTERCONTROLLER_BUNDLE))), "cluster/v2/*" ); if (runStandaloneZooKeeper) { addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification( new ComponentSpecification("clustercontroller-zkrunner"), new ComponentSpecification("com.yahoo.vespa.zookeeper.ZooKeeperServer"), ZKFACADE_BUNDLE)))); addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification( new ComponentSpecification("clustercontroller-zkprovider"), new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StandaloneZooKeeperProvider"), CLUSTERCONTROLLER_BUNDLE)))); } else { addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification( new ComponentSpecification("clustercontroller-zkprovider"), new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.DummyZooKeeperProvider"), CLUSTERCONTROLLER_BUNDLE)))); } addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-apps-jar-with-dependencies.jar")); addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-apputil-jar-with-dependencies.jar")); addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-core-jar-with-dependencies.jar")); addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-utils-jar-with-dependencies.jar")); addBundle("file:" + getDefaults().underVespaHome("lib/jars/zkfacade-jar-with-dependencies.jar")); log.log(LogLevel.DEBUG, "Adding access log for cluster controller ..."); addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, "controller", isHosted)); } @Override public int getWantedPort() { return 19050; } @Override public boolean requiresWantedPort() { return index() == 0; } @Override public ContainerServiceType myServiceType() { return ContainerServiceType.CLUSTERCONTROLLER_CONTAINER; } private void addHandler(Handler h, String binding) { h.addServerBindings("http: super.addHandler(h); } public void addBundle(String bundlePath) { bundles.add(bundlePath); } @Override public void getConfig(BundlesConfig.Builder builder) { for (String bundle : bundles) { builder.bundle(bundle); } } @Override public void getConfig(ZookeeperServerConfig.Builder builder) { builder.myid(index()); } @Override }
class ClusterControllerContainer extends Container implements BundlesConfig.Producer, ZookeeperServerConfig.Producer, QrStartConfig.Producer { private static final ComponentSpecification CLUSTERCONTROLLER_BUNDLE = new ComponentSpecification("clustercontroller-apps"); private static final ComponentSpecification ZKFACADE_BUNDLE = new ComponentSpecification("zkfacade"); private final Set<String> bundles = new TreeSet<>(); public ClusterControllerContainer(AbstractConfigProducer parent, int index, boolean runStandaloneZooKeeper, boolean isHosted) { super(parent, "" + index, index); addHandler( new Handler(new ComponentModel(new BundleInstantiationSpecification( new ComponentSpecification("clustercontroller-status"), new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StatusHandler"), CLUSTERCONTROLLER_BUNDLE))), "clustercontroller-status/*" ); addHandler( new Handler(new ComponentModel(new BundleInstantiationSpecification( new ComponentSpecification("clustercontroller-state-restapi-v2"), new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StateRestApiV2Handler"), CLUSTERCONTROLLER_BUNDLE))), "cluster/v2/*" ); if (runStandaloneZooKeeper) { addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification( new ComponentSpecification("clustercontroller-zkrunner"), new ComponentSpecification("com.yahoo.vespa.zookeeper.ZooKeeperServer"), ZKFACADE_BUNDLE)))); addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification( new ComponentSpecification("clustercontroller-zkprovider"), new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.StandaloneZooKeeperProvider"), CLUSTERCONTROLLER_BUNDLE)))); } else { addComponent(new Component<>(new ComponentModel(new BundleInstantiationSpecification( new ComponentSpecification("clustercontroller-zkprovider"), new ComponentSpecification("com.yahoo.vespa.clustercontroller.apps.clustercontroller.DummyZooKeeperProvider"), CLUSTERCONTROLLER_BUNDLE)))); } addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-apps-jar-with-dependencies.jar")); addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-apputil-jar-with-dependencies.jar")); addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-core-jar-with-dependencies.jar")); addBundle("file:" + getDefaults().underVespaHome("lib/jars/clustercontroller-utils-jar-with-dependencies.jar")); addBundle("file:" + getDefaults().underVespaHome("lib/jars/zkfacade-jar-with-dependencies.jar")); log.log(LogLevel.DEBUG, "Adding access log for cluster controller ..."); addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, "controller", isHosted)); } @Override public int getWantedPort() { return 19050; } @Override public boolean requiresWantedPort() { return index() == 0; } @Override public ContainerServiceType myServiceType() { return ContainerServiceType.CLUSTERCONTROLLER_CONTAINER; } private void addHandler(Handler h, String binding) { h.addServerBindings("http: super.addHandler(h); } public void addBundle(String bundlePath) { bundles.add(bundlePath); } @Override public void getConfig(BundlesConfig.Builder builder) { for (String bundle : bundles) { builder.bundle(bundle); } } @Override public void getConfig(ZookeeperServerConfig.Builder builder) { builder.myid(index()); } @Override }
isn't this deprecated in newer Java versions? or have they given up?
private static Field getField(Class<?> clazz, String fieldName) { Field field; try { field = clazz.getDeclaredField(fieldName); } catch (NoSuchFieldException e) { throw new RuntimeException(e); } field.setAccessible(true); return field; }
field.setAccessible(true);
private static Field getField(Class<?> clazz, String fieldName) { Field field; try { field = clazz.getDeclaredField(fieldName); } catch (NoSuchFieldException e) { throw new RuntimeException(e); } field.setAccessible(true); return field; }
class NativeIO { private final Logger logger = Logger.getLogger(getClass().getName()); private static final int POSIX_FADV_DONTNEED = 4; private static boolean initialized = false; private static Throwable initError = null; static { try { if (Platform.isLinux()) { Native.register(Platform.C_LIBRARY_NAME); initialized = true; } } catch (Throwable throwable) { initError = throwable; } } private final Field fieldFD; private static native int posix_fadvise(int fd, long offset, long len, int flag) throws LastErrorException; public NativeIO() { if (!initialized) { logger.warning("native IO not possible due to " + getError().getMessage()); } fieldFD = getField(FileDescriptor.class, "fd"); } public boolean valid() { return initialized; } public Throwable getError() { if (initError != null) { return initError; } else { return new RuntimeException("Platform is unsúpported. Only supported on linux."); } } public void dropFileFromCache(FileDescriptor fd) { if (initialized) { posix_fadvise(getfh(fd), 0, 0, POSIX_FADV_DONTNEED); } } public void dropFileFromCache(File file) { try { dropFileFromCache(new FileInputStream(file).getFD()); } catch (FileNotFoundException e) { throw new RuntimeException(e); } catch (IOException e) { throw new RuntimeException(e); } } private int getfh(FileDescriptor fd) { try { return fieldFD.getInt(fd); } catch (IllegalAccessException e) { throw new RuntimeException(e); } } }
class NativeIO { private final Logger logger = Logger.getLogger(getClass().getName()); private static final int POSIX_FADV_DONTNEED = 4; private static boolean initialized = false; private static Throwable initError = null; static { try { if (Platform.isLinux()) { Native.register(Platform.C_LIBRARY_NAME); initialized = true; } } catch (Throwable throwable) { initError = throwable; } } private static final Field fieldFD = getField(FileDescriptor.class, "fd"); private static native int posix_fadvise(int fd, long offset, long len, int flag) throws LastErrorException; public NativeIO() { if (!initialized) { logger.warning("native IO not possible due to " + getError().getMessage()); } } public boolean valid() { return initialized; } public Throwable getError() { if (initError != null) { return initError; } else { return new RuntimeException("Platform is unsúpported. Only supported on linux."); } } public void dropFileFromCache(FileDescriptor fd) { if (initialized) { posix_fadvise(getNativeFD(fd), 0, 0, POSIX_FADV_DONTNEED); } } public void dropFileFromCache(File file) { try { dropFileFromCache(new FileInputStream(file).getFD()); } catch (FileNotFoundException e) { throw new RuntimeException(e); } catch (IOException e) { throw new RuntimeException(e); } } private static int getNativeFD(FileDescriptor fd) { try { return fieldFD.getInt(fd); } catch (IllegalAccessException e) { throw new RuntimeException(e); } } }
Fixed
private void triggerCompression(String oldFileName) { try { String zippedFileName = oldFileName + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFileName).append(" > ").append(zippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); File oldFile = new File(oldFileName); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(zippedFileName)); } catch (IOException e) { } }
String zippedFileName = oldFileName + ".gz";
private void triggerCompression(String oldFileName) { try { String gzippedFileName = oldFileName + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFileName).append(" > ").append(gzippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); File oldFile = new File(oldFileName); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(gzippedFileName)); } catch (IOException e) { } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
In case there are other references to it preventing evaporating immediately. Probably not strictly necessary, but a precaution.
private void triggerCompression(String oldFileName) { try { String zippedFileName = oldFileName + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFileName).append(" > ").append(zippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); File oldFile = new File(oldFileName); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(zippedFileName)); } catch (IOException e) { } }
nativeIO.dropFileFromCache(oldFile);
private void triggerCompression(String oldFileName) { try { String gzippedFileName = oldFileName + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFileName).append(" > ").append(gzippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); File oldFile = new File(oldFileName); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(gzippedFileName)); } catch (IOException e) { } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
Have no idea. Works well sofar.
private static Field getField(Class<?> clazz, String fieldName) { Field field; try { field = clazz.getDeclaredField(fieldName); } catch (NoSuchFieldException e) { throw new RuntimeException(e); } field.setAccessible(true); return field; }
field.setAccessible(true);
private static Field getField(Class<?> clazz, String fieldName) { Field field; try { field = clazz.getDeclaredField(fieldName); } catch (NoSuchFieldException e) { throw new RuntimeException(e); } field.setAccessible(true); return field; }
class NativeIO { private final Logger logger = Logger.getLogger(getClass().getName()); private static final int POSIX_FADV_DONTNEED = 4; private static boolean initialized = false; private static Throwable initError = null; static { try { if (Platform.isLinux()) { Native.register(Platform.C_LIBRARY_NAME); initialized = true; } } catch (Throwable throwable) { initError = throwable; } } private final Field fieldFD; private static native int posix_fadvise(int fd, long offset, long len, int flag) throws LastErrorException; public NativeIO() { if (!initialized) { logger.warning("native IO not possible due to " + getError().getMessage()); } fieldFD = getField(FileDescriptor.class, "fd"); } public boolean valid() { return initialized; } public Throwable getError() { if (initError != null) { return initError; } else { return new RuntimeException("Platform is unsúpported. Only supported on linux."); } } public void dropFileFromCache(FileDescriptor fd) { if (initialized) { posix_fadvise(getfh(fd), 0, 0, POSIX_FADV_DONTNEED); } } public void dropFileFromCache(File file) { try { dropFileFromCache(new FileInputStream(file).getFD()); } catch (FileNotFoundException e) { throw new RuntimeException(e); } catch (IOException e) { throw new RuntimeException(e); } } private int getfh(FileDescriptor fd) { try { return fieldFD.getInt(fd); } catch (IllegalAccessException e) { throw new RuntimeException(e); } } }
class NativeIO { private final Logger logger = Logger.getLogger(getClass().getName()); private static final int POSIX_FADV_DONTNEED = 4; private static boolean initialized = false; private static Throwable initError = null; static { try { if (Platform.isLinux()) { Native.register(Platform.C_LIBRARY_NAME); initialized = true; } } catch (Throwable throwable) { initError = throwable; } } private static final Field fieldFD = getField(FileDescriptor.class, "fd"); private static native int posix_fadvise(int fd, long offset, long len, int flag) throws LastErrorException; public NativeIO() { if (!initialized) { logger.warning("native IO not possible due to " + getError().getMessage()); } } public boolean valid() { return initialized; } public Throwable getError() { if (initError != null) { return initError; } else { return new RuntimeException("Platform is unsúpported. Only supported on linux."); } } public void dropFileFromCache(FileDescriptor fd) { if (initialized) { posix_fadvise(getNativeFD(fd), 0, 0, POSIX_FADV_DONTNEED); } } public void dropFileFromCache(File file) { try { dropFileFromCache(new FileInputStream(file).getFD()); } catch (FileNotFoundException e) { throw new RuntimeException(e); } catch (IOException e) { throw new RuntimeException(e); } } private static int getNativeFD(FileDescriptor fd) { try { return fieldFD.getInt(fd); } catch (IllegalAccessException e) { throw new RuntimeException(e); } } }
getName() just returns "name" from "/a/path/to/a/file/name". Is that OK here or should the full path be used? It wasn't obvious to me if the oldFileName string is a full path or not.
private void triggerCompression(File oldFile) { try { String gzippedFileName = oldFile.getName() + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFile.getName()).append(" > ").append(gzippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(gzippedFileName)); } catch (IOException e) { } }
String gzippedFileName = oldFile.getName() + ".gz";
private void triggerCompression(File oldFile) { try { String oldFileName = oldFile.getPath(); String gzippedFileName = oldFileName + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFileName).append(" > ").append(gzippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(gzippedFileName)); } catch (IOException e) { } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
I will check.
private void triggerCompression(File oldFile) { try { String gzippedFileName = oldFile.getName() + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFile.getName()).append(" > ").append(gzippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(gzippedFileName)); } catch (IOException e) { } }
String gzippedFileName = oldFile.getName() + ".gz";
private void triggerCompression(File oldFile) { try { String oldFileName = oldFile.getPath(); String gzippedFileName = oldFileName + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFileName).append(" > ").append(gzippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(gzippedFileName)); } catch (IOException e) { } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
Good catch, it is configurable with current directory as default. Using getPath instead.
private void triggerCompression(File oldFile) { try { String gzippedFileName = oldFile.getName() + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFile.getName()).append(" > ").append(gzippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(gzippedFileName)); } catch (IOException e) { } }
String gzippedFileName = oldFile.getName() + ".gz";
private void triggerCompression(File oldFile) { try { String oldFileName = oldFile.getPath(); String gzippedFileName = oldFileName + ".gz"; Runtime r = Runtime.getRuntime(); StringBuilder cmd = new StringBuilder("gzip"); cmd.append(" < "). append(oldFileName).append(" > ").append(gzippedFileName); Process p = r.exec(cmd.toString()); NativeIO nativeIO = new NativeIO(); nativeIO.dropFileFromCache(oldFile); oldFile.delete(); nativeIO.dropFileFromCache(new File(gzippedFileName)); } catch (IOException e) { } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
class LogThread extends Thread { LogFileHandler logFileHandler; long lastFlush = 0; public LogThread(LogFileHandler logFile) { super("Logger"); setDaemon(true); logFileHandler = logFile; } @Override public void run() { try { storeLogRecords(); } catch (InterruptedException e) { } catch (Exception e) { com.yahoo.protect.Process.logAndDie("Failed storing log records", e); } logFileHandler.flush(); } private void storeLogRecords() throws InterruptedException { while (!isInterrupted()) { LogRecord r = logFileHandler.logQueue.poll(100, TimeUnit.MILLISECONDS); if (r != null) { if (r == logFileHandler.rotateCmd) { logFileHandler.internalRotateNow(); lastFlush = System.nanoTime(); } else { logFileHandler.internalPublish(r); } flushIfOld(3, TimeUnit.SECONDS); } else { flushIfOld(100, TimeUnit.MILLISECONDS); } } } private void flushIfOld(long age, TimeUnit unit) { long now = System.nanoTime(); if (TimeUnit.NANOSECONDS.toMillis(now - lastFlush) > unit.toMillis(age)) { logFileHandler.flush(); lastFlush = now; } } }
I think I need some context on this one.
public static void serializeStringField(JsonGenerator generator, FieldBase field, StringFieldValue value) { if (value.getString().length() == 0 && field != null) { return; } serializeString(generator, field, value.getString()); }
return;
public static void serializeStringField(JsonGenerator generator, FieldBase field, StringFieldValue value) { if (value.getString().length() == 0 && field != null) { return; } serializeString(generator, field, value.getString()); }
class JsonSerializationException extends RuntimeException { public JsonSerializationException(Exception base) { super(base); } public JsonSerializationException(String message) { super(message); } }
class JsonSerializationException extends RuntimeException { public JsonSerializationException(Exception base) { super(base); } public JsonSerializationException(String message) { super(message); } }
When serializing an entry in an array 'field' will be null. Hence giving correct behavior for arrays.
public static void serializeStringField(JsonGenerator generator, FieldBase field, StringFieldValue value) { if (value.getString().length() == 0 && field != null) { return; } serializeString(generator, field, value.getString()); }
return;
public static void serializeStringField(JsonGenerator generator, FieldBase field, StringFieldValue value) { if (value.getString().length() == 0 && field != null) { return; } serializeString(generator, field, value.getString()); }
class JsonSerializationException extends RuntimeException { public JsonSerializationException(Exception base) { super(base); } public JsonSerializationException(String message) { super(message); } }
class JsonSerializationException extends RuntimeException { public JsonSerializationException(Exception base) { super(base); } public JsonSerializationException(String message) { super(message); } }
That context should be in the code ...
public static void serializeStringField(JsonGenerator generator, FieldBase field, StringFieldValue value) { if (value.getString().length() == 0 && field != null) { return; } serializeString(generator, field, value.getString()); }
return;
public static void serializeStringField(JsonGenerator generator, FieldBase field, StringFieldValue value) { if (value.getString().length() == 0 && field != null) { return; } serializeString(generator, field, value.getString()); }
class JsonSerializationException extends RuntimeException { public JsonSerializationException(Exception base) { super(base); } public JsonSerializationException(String message) { super(message); } }
class JsonSerializationException extends RuntimeException { public JsonSerializationException(Exception base) { super(base); } public JsonSerializationException(String message) { super(message); } }
Should we invoke super's `destroy()` as well?
public void destroy() { if (destroyed.getAndSet(true)) throw new RuntimeException("Already destroyed"); mirror.shutdown(); orb.transport().shutdown().join(); }
orb.transport().shutdown().join();
public void destroy() { if (destroyed.getAndSet(true)) throw new RuntimeException("Already destroyed"); mirror.shutdown(); orb.transport().shutdown().join(); }
class ExternPolicy implements DocumentProtocolRoutingPolicy { private Supervisor orb = null; private Mirror mirror = null; private String pattern = null; private String session = null; private final String error; private int offset = 0; private int generation = 0; private final List<Hop> recipients = new ArrayList<>(); private final AtomicBoolean destroyed = new AtomicBoolean(false); /** * Constructs a new instance of this policy. The argument given is the connection spec to the slobrok to use for * resolving recipients, as well as the pattern to use when querying. This constructor does _not_ wait for the * mirror to become ready. * * @param arg The slobrok connection spec. */ public ExternPolicy(String arg) { if (arg == null || arg.length() == 0) { error = "Expected parameter, got empty string."; return; } String[] args = arg.split(";", 2); if (args.length != 2 || args[0].length() == 0 || args[1].length() == 0) { error = "Expected parameter on the form '<spec>;<pattern>', got '" + arg + "'."; return; } int pos = args[1].lastIndexOf('/'); if (pos < 0) { error = "Expected pattern on the form '<service>/<session>', got '" + args[1] + "'."; return; } SlobrokList slobroks = new SlobrokList(); slobroks.setup(args[0].split(",")); pattern = args[1]; session = pattern.substring(pos); orb = new Supervisor(new Transport()); mirror = new Mirror(orb, slobroks); error = null; } /** * This is a safety mechanism to allow the constructor to fail and signal that it can not be used. * * @return The error string, or null if no error. */ public String getError() { return error; } /** * Returns the slobrok mirror used by this policy to resolve external recipients. * * @return The external mirror. */ public Mirror getMirror() { return mirror; } /** * Returns the appropriate recipient hop. This method provides synchronized access to the internal mirror. * * @return The recipient hop to use. */ private synchronized Hop getRecipient() { update(); if (recipients.isEmpty()) { return null; } int offset = ++this.offset & Integer.MAX_VALUE; return new Hop(recipients.get(offset % recipients.size())); } /** * Updates the list of matching recipients by querying the extern slobrok. */ private void update() { int upd = mirror.updates(); if (generation != upd) { generation = upd; recipients.clear(); Mirror.Entry[] arr = mirror.lookup(pattern); for (Mirror.Entry entry : arr) { recipients.add(Hop.parse(entry.getSpec() + session)); } } } @Override public void select(RoutingContext ctx) { if (error != null) { ctx.setError(DocumentProtocol.ERROR_POLICY_FAILURE, error); } else if (mirror.ready()) { Hop hop = getRecipient(); if (hop != null) { Route route = new Route(ctx.getRoute()); route.setHop(0, hop); ctx.addChild(route); } else { ctx.setError(ErrorCode.NO_ADDRESS_FOR_SERVICE, "Could not resolve any recipients from '" + pattern + "'."); } } else { ctx.setError(ErrorCode.APP_TRANSIENT_ERROR, "Extern slobrok not ready."); } } @Override public void merge(RoutingContext ctx) { DocumentProtocol.merge(ctx); } @Override @Override public MetricSet getMetrics() { return null; } }
class ExternPolicy implements DocumentProtocolRoutingPolicy { private Supervisor orb = null; private Mirror mirror = null; private String pattern = null; private String session = null; private final String error; private int offset = 0; private int generation = 0; private final List<Hop> recipients = new ArrayList<>(); private final AtomicBoolean destroyed = new AtomicBoolean(false); /** * Constructs a new instance of this policy. The argument given is the connection spec to the slobrok to use for * resolving recipients, as well as the pattern to use when querying. This constructor does _not_ wait for the * mirror to become ready. * * @param arg The slobrok connection spec. */ public ExternPolicy(String arg) { if (arg == null || arg.length() == 0) { error = "Expected parameter, got empty string."; return; } String[] args = arg.split(";", 2); if (args.length != 2 || args[0].length() == 0 || args[1].length() == 0) { error = "Expected parameter on the form '<spec>;<pattern>', got '" + arg + "'."; return; } int pos = args[1].lastIndexOf('/'); if (pos < 0) { error = "Expected pattern on the form '<service>/<session>', got '" + args[1] + "'."; return; } SlobrokList slobroks = new SlobrokList(); slobroks.setup(args[0].split(",")); pattern = args[1]; session = pattern.substring(pos); orb = new Supervisor(new Transport()); mirror = new Mirror(orb, slobroks); error = null; } /** * This is a safety mechanism to allow the constructor to fail and signal that it can not be used. * * @return The error string, or null if no error. */ public String getError() { return error; } /** * Returns the slobrok mirror used by this policy to resolve external recipients. * * @return The external mirror. */ public Mirror getMirror() { return mirror; } /** * Returns the appropriate recipient hop. This method provides synchronized access to the internal mirror. * * @return The recipient hop to use. */ private synchronized Hop getRecipient() { update(); if (recipients.isEmpty()) { return null; } int offset = ++this.offset & Integer.MAX_VALUE; return new Hop(recipients.get(offset % recipients.size())); } /** * Updates the list of matching recipients by querying the extern slobrok. */ private void update() { int upd = mirror.updates(); if (generation != upd) { generation = upd; recipients.clear(); Mirror.Entry[] arr = mirror.lookup(pattern); for (Mirror.Entry entry : arr) { recipients.add(Hop.parse(entry.getSpec() + session)); } } } @Override public void select(RoutingContext ctx) { if (error != null) { ctx.setError(DocumentProtocol.ERROR_POLICY_FAILURE, error); } else if (mirror.ready()) { Hop hop = getRecipient(); if (hop != null) { Route route = new Route(ctx.getRoute()); route.setHop(0, hop); ctx.addChild(route); } else { ctx.setError(ErrorCode.NO_ADDRESS_FOR_SERVICE, "Could not resolve any recipients from '" + pattern + "'."); } } else { ctx.setError(ErrorCode.APP_TRANSIENT_ERROR, "Extern slobrok not ready."); } } @Override public void merge(RoutingContext ctx) { DocumentProtocol.merge(ctx); } @Override @Override public MetricSet getMetrics() { return null; } }
Not necessary since the method is inherited from an interface.
public void destroy() { if (destroyed.getAndSet(true)) throw new RuntimeException("Already destroyed"); mirror.shutdown(); orb.transport().shutdown().join(); }
orb.transport().shutdown().join();
public void destroy() { if (destroyed.getAndSet(true)) throw new RuntimeException("Already destroyed"); mirror.shutdown(); orb.transport().shutdown().join(); }
class ExternPolicy implements DocumentProtocolRoutingPolicy { private Supervisor orb = null; private Mirror mirror = null; private String pattern = null; private String session = null; private final String error; private int offset = 0; private int generation = 0; private final List<Hop> recipients = new ArrayList<>(); private final AtomicBoolean destroyed = new AtomicBoolean(false); /** * Constructs a new instance of this policy. The argument given is the connection spec to the slobrok to use for * resolving recipients, as well as the pattern to use when querying. This constructor does _not_ wait for the * mirror to become ready. * * @param arg The slobrok connection spec. */ public ExternPolicy(String arg) { if (arg == null || arg.length() == 0) { error = "Expected parameter, got empty string."; return; } String[] args = arg.split(";", 2); if (args.length != 2 || args[0].length() == 0 || args[1].length() == 0) { error = "Expected parameter on the form '<spec>;<pattern>', got '" + arg + "'."; return; } int pos = args[1].lastIndexOf('/'); if (pos < 0) { error = "Expected pattern on the form '<service>/<session>', got '" + args[1] + "'."; return; } SlobrokList slobroks = new SlobrokList(); slobroks.setup(args[0].split(",")); pattern = args[1]; session = pattern.substring(pos); orb = new Supervisor(new Transport()); mirror = new Mirror(orb, slobroks); error = null; } /** * This is a safety mechanism to allow the constructor to fail and signal that it can not be used. * * @return The error string, or null if no error. */ public String getError() { return error; } /** * Returns the slobrok mirror used by this policy to resolve external recipients. * * @return The external mirror. */ public Mirror getMirror() { return mirror; } /** * Returns the appropriate recipient hop. This method provides synchronized access to the internal mirror. * * @return The recipient hop to use. */ private synchronized Hop getRecipient() { update(); if (recipients.isEmpty()) { return null; } int offset = ++this.offset & Integer.MAX_VALUE; return new Hop(recipients.get(offset % recipients.size())); } /** * Updates the list of matching recipients by querying the extern slobrok. */ private void update() { int upd = mirror.updates(); if (generation != upd) { generation = upd; recipients.clear(); Mirror.Entry[] arr = mirror.lookup(pattern); for (Mirror.Entry entry : arr) { recipients.add(Hop.parse(entry.getSpec() + session)); } } } @Override public void select(RoutingContext ctx) { if (error != null) { ctx.setError(DocumentProtocol.ERROR_POLICY_FAILURE, error); } else if (mirror.ready()) { Hop hop = getRecipient(); if (hop != null) { Route route = new Route(ctx.getRoute()); route.setHop(0, hop); ctx.addChild(route); } else { ctx.setError(ErrorCode.NO_ADDRESS_FOR_SERVICE, "Could not resolve any recipients from '" + pattern + "'."); } } else { ctx.setError(ErrorCode.APP_TRANSIENT_ERROR, "Extern slobrok not ready."); } } @Override public void merge(RoutingContext ctx) { DocumentProtocol.merge(ctx); } @Override @Override public MetricSet getMetrics() { return null; } }
class ExternPolicy implements DocumentProtocolRoutingPolicy { private Supervisor orb = null; private Mirror mirror = null; private String pattern = null; private String session = null; private final String error; private int offset = 0; private int generation = 0; private final List<Hop> recipients = new ArrayList<>(); private final AtomicBoolean destroyed = new AtomicBoolean(false); /** * Constructs a new instance of this policy. The argument given is the connection spec to the slobrok to use for * resolving recipients, as well as the pattern to use when querying. This constructor does _not_ wait for the * mirror to become ready. * * @param arg The slobrok connection spec. */ public ExternPolicy(String arg) { if (arg == null || arg.length() == 0) { error = "Expected parameter, got empty string."; return; } String[] args = arg.split(";", 2); if (args.length != 2 || args[0].length() == 0 || args[1].length() == 0) { error = "Expected parameter on the form '<spec>;<pattern>', got '" + arg + "'."; return; } int pos = args[1].lastIndexOf('/'); if (pos < 0) { error = "Expected pattern on the form '<service>/<session>', got '" + args[1] + "'."; return; } SlobrokList slobroks = new SlobrokList(); slobroks.setup(args[0].split(",")); pattern = args[1]; session = pattern.substring(pos); orb = new Supervisor(new Transport()); mirror = new Mirror(orb, slobroks); error = null; } /** * This is a safety mechanism to allow the constructor to fail and signal that it can not be used. * * @return The error string, or null if no error. */ public String getError() { return error; } /** * Returns the slobrok mirror used by this policy to resolve external recipients. * * @return The external mirror. */ public Mirror getMirror() { return mirror; } /** * Returns the appropriate recipient hop. This method provides synchronized access to the internal mirror. * * @return The recipient hop to use. */ private synchronized Hop getRecipient() { update(); if (recipients.isEmpty()) { return null; } int offset = ++this.offset & Integer.MAX_VALUE; return new Hop(recipients.get(offset % recipients.size())); } /** * Updates the list of matching recipients by querying the extern slobrok. */ private void update() { int upd = mirror.updates(); if (generation != upd) { generation = upd; recipients.clear(); Mirror.Entry[] arr = mirror.lookup(pattern); for (Mirror.Entry entry : arr) { recipients.add(Hop.parse(entry.getSpec() + session)); } } } @Override public void select(RoutingContext ctx) { if (error != null) { ctx.setError(DocumentProtocol.ERROR_POLICY_FAILURE, error); } else if (mirror.ready()) { Hop hop = getRecipient(); if (hop != null) { Route route = new Route(ctx.getRoute()); route.setHop(0, hop); ctx.addChild(route); } else { ctx.setError(ErrorCode.NO_ADDRESS_FOR_SERVICE, "Could not resolve any recipients from '" + pattern + "'."); } } else { ctx.setError(ErrorCode.APP_TRANSIENT_ERROR, "Extern slobrok not ready."); } } @Override public void merge(RoutingContext ctx) { DocumentProtocol.merge(ctx); } @Override @Override public MetricSet getMetrics() { return null; } }
Shouldn't e.g. 1 hour be enough?
private void invalidateGeneration(long generation, Throwable cause) { Duration maxWaitToExit = Duration.ofDays(1); leastGeneration = Math.max(configurer.getComponentsGeneration(), configurer.getBootstrapGeneration()) + 1; if (!(cause instanceof InterruptedException) && !(cause instanceof ConfigInterruptedException)) { log.log(Level.WARNING, newGraphErrorMessage(generation, cause, maxWaitToExit), cause); } }
Duration maxWaitToExit = Duration.ofDays(1);
private void invalidateGeneration(long generation, Throwable cause) { Duration maxWaitToExit = Duration.ofDays(1); leastGeneration = Math.max(configurer.getComponentsGeneration(), configurer.getBootstrapGeneration()) + 1; if (!(cause instanceof InterruptedException) && !(cause instanceof ConfigInterruptedException)) { log.log(Level.WARNING, newGraphErrorMessage(generation, cause, maxWaitToExit), cause); } }
class Container { private static final Logger log = Logger.getLogger(Container.class.getName()); private final SubscriberFactory subscriberFactory; private ConfigKey<BundlesConfig> bundlesConfigKey; private ConfigKey<ComponentsConfig> componentsConfigKey; private final ComponentDeconstructor componentDeconstructor; private final Osgi osgi; private ConfigRetriever configurer; private long previousConfigGeneration = -1L; private long leastGeneration = -1L; public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor componentDeconstructor, Osgi osgi) { this.subscriberFactory = subscriberFactory; this.bundlesConfigKey = new ConfigKey<>(BundlesConfig.class, configId); this.componentsConfigKey = new ConfigKey<>(ComponentsConfig.class, configId); this.componentDeconstructor = componentDeconstructor; this.osgi = osgi; Set<ConfigKey<? extends ConfigInstance>> keySet = new HashSet<>(); keySet.add(bundlesConfigKey); keySet.add(componentsConfigKey); this.configurer = new ConfigRetriever(keySet, subscriberFactory::getSubscriber); } public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor componentDeconstructor) { this(subscriberFactory, configId, componentDeconstructor, new Osgi() { }); } private void deconstructObsoleteComponents(ComponentGraph oldGraph, ComponentGraph newGraph) { IdentityHashMap<Object, Object> oldComponents = new IdentityHashMap<>(); oldGraph.allComponentsAndProviders().forEach(c -> oldComponents.put(c, null)); newGraph.allComponentsAndProviders().forEach(oldComponents::remove); oldComponents.keySet().forEach(componentDeconstructor::deconstruct); } public ComponentGraph getNewComponentGraph(ComponentGraph oldGraph, Injector fallbackInjector, boolean restartOnRedeploy) { try { ComponentGraph newGraph = getConfigAndCreateGraph(oldGraph, fallbackInjector, restartOnRedeploy); newGraph.reuseNodes(oldGraph); constructComponents(newGraph); deconstructObsoleteComponents(oldGraph, newGraph); return newGraph; } catch (Throwable t) { invalidateGeneration(oldGraph.generation(), t); throw t; } } ComponentGraph getNewComponentGraph(ComponentGraph oldGraph) { return getNewComponentGraph(oldGraph, Guice.createInjector(), false); } ComponentGraph getNewComponentGraph() { return getNewComponentGraph(new ComponentGraph(), Guice.createInjector(), false); } private static String newGraphErrorMessage(long generation, Throwable cause, Duration maxWaitToExit) { String failedFirstMessage = "Failed to set up first component graph"; String failedNewMessage = "Failed to set up new component graph"; String constructMessage = " due to error when constructing one of the components"; String exitMessage = ". Exiting within " + maxWaitToExit.toString(); String retainMessage = ". Retaining previous component generation."; if (generation == 0) { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedFirstMessage + constructMessage + exitMessage; } else { return failedFirstMessage + exitMessage; } } else { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedNewMessage + constructMessage + retainMessage; } else { return failedNewMessage + retainMessage; } } } private ComponentGraph getConfigAndCreateGraph(ComponentGraph graph, Injector fallbackInjector, boolean restartOnRedeploy) { ConfigSnapshot snapshot; while (true) { snapshot = configurer.getConfigs(graph.configKeys(), leastGeneration, restartOnRedeploy); log.log(DEBUG, String.format("createNewGraph:\n" + "graph.configKeys = %s\n" + "graph.generation = %s\n" + "snapshot = %s\n", graph.configKeys(), graph.generation(), snapshot)); if (snapshot instanceof BootstrapConfigs) { if (getBootstrapGeneration() <= previousConfigGeneration) { throw new IllegalStateException(String.format( "Got bootstrap configs out of sequence for old config generation %d.\n" + "Previous config generation is %d", getBootstrapGeneration(), previousConfigGeneration)); } log.log(DEBUG, String.format( "Got new bootstrap generation\n" + "bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d\n", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration)); installBundles(snapshot.configs()); graph = createComponentsGraph(snapshot.configs(), getBootstrapGeneration(), fallbackInjector); } else if (snapshot instanceof ConfigRetriever.ComponentsConfigs) { break; } } log.log(DEBUG, String.format( "Got components configs,\n" + "bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration)); return createAndConfigureComponentsGraph(snapshot.configs(), fallbackInjector); } private long getBootstrapGeneration() { return configurer.getBootstrapGeneration(); } private long getComponentsGeneration() { return configurer.getComponentsGeneration(); } private ComponentGraph createAndConfigureComponentsGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> componentsConfigs, Injector fallbackInjector) { ComponentGraph componentGraph = createComponentsGraph(componentsConfigs, getComponentsGeneration(), fallbackInjector); componentGraph.setAvailableConfigs(componentsConfigs); return componentGraph; } private void injectNodes(ComponentsConfig config, ComponentGraph graph) { for (ComponentsConfig.Components component : config.components()) { Node componentNode = ComponentGraph.getNode(graph, component.id()); for (ComponentsConfig.Components.Inject inject : component.inject()) { componentNode.inject(ComponentGraph.getNode(graph, inject.id())); } } } public void installBundles(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs) { BundlesConfig bundlesConfig = getConfig(bundlesConfigKey, configsIncludingBootstrapConfigs); osgi.useBundles(bundlesConfig.bundle()); } private ComponentGraph createComponentsGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs, long generation, Injector fallbackInjector) { previousConfigGeneration = generation; ComponentGraph graph = new ComponentGraph(generation); ComponentsConfig componentsConfig = getConfig(componentsConfigKey, configsIncludingBootstrapConfigs); if (componentsConfig == null) { throw new ConfigurationRuntimeException("The set of all configs does not include a valid 'components' config. Config set: " + configsIncludingBootstrapConfigs.keySet()); } addNodes(componentsConfig, graph); injectNodes(componentsConfig, graph); graph.complete(fallbackInjector); return graph; } private void addNodes(ComponentsConfig componentsConfig, ComponentGraph graph) { for (ComponentsConfig.Components config : componentsConfig.components()) { BundleInstantiationSpecification specification = bundleInstatiationSpecification(config); Class<?> componentClass = osgi.resolveClass(specification); Node componentNode; if (RestApiContext.class.isAssignableFrom(componentClass)) { Class<? extends RestApiContext> nodeClass = componentClass.asSubclass(RestApiContext.class); componentNode = new JerseyNode(specification.id, config.configId(), nodeClass, osgi); } else { componentNode = new ComponentNode(specification.id, config.configId(), componentClass, null); } graph.add(componentNode); } } private void constructComponents(ComponentGraph graph) { graph.nodes().forEach(Node::newOrCachedInstance); } public void shutdown(ComponentGraph graph, ComponentDeconstructor deconstructor) { shutdownConfigurer(); if (graph != null) { deconstructAllComponents(graph, deconstructor); } } public void shutdownConfigurer() { configurer.shutdown(); } public void reloadConfig(long generation) { subscriberFactory.reloadActiveSubscribers(generation); } private void deconstructAllComponents(ComponentGraph graph, ComponentDeconstructor deconstructor) { graph.allComponentsAndProviders().forEach(deconstructor::deconstruct); } public static <T extends ConfigInstance> T getConfig(ConfigKey<T> key, Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configs) { ConfigInstance inst = configs.get(key); if (inst == null || key.getConfigClass() == null) { throw new RuntimeException("Missing config " + key); } return key.getConfigClass().cast(inst); } public static BundleInstantiationSpecification bundleInstatiationSpecification(ComponentsConfig.Components config) { return BundleInstantiationSpecification.getFromStrings(config.id(), config.classId(), config.bundle()); } }
class Container { private static final Logger log = Logger.getLogger(Container.class.getName()); private final SubscriberFactory subscriberFactory; private ConfigKey<BundlesConfig> bundlesConfigKey; private ConfigKey<ComponentsConfig> componentsConfigKey; private final ComponentDeconstructor componentDeconstructor; private final Osgi osgi; private ConfigRetriever configurer; private long previousConfigGeneration = -1L; private long leastGeneration = -1L; public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor componentDeconstructor, Osgi osgi) { this.subscriberFactory = subscriberFactory; this.bundlesConfigKey = new ConfigKey<>(BundlesConfig.class, configId); this.componentsConfigKey = new ConfigKey<>(ComponentsConfig.class, configId); this.componentDeconstructor = componentDeconstructor; this.osgi = osgi; Set<ConfigKey<? extends ConfigInstance>> keySet = new HashSet<>(); keySet.add(bundlesConfigKey); keySet.add(componentsConfigKey); this.configurer = new ConfigRetriever(keySet, subscriberFactory::getSubscriber); } public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor componentDeconstructor) { this(subscriberFactory, configId, componentDeconstructor, new Osgi() { }); } private void deconstructObsoleteComponents(ComponentGraph oldGraph, ComponentGraph newGraph) { IdentityHashMap<Object, Object> oldComponents = new IdentityHashMap<>(); oldGraph.allComponentsAndProviders().forEach(c -> oldComponents.put(c, null)); newGraph.allComponentsAndProviders().forEach(oldComponents::remove); oldComponents.keySet().forEach(componentDeconstructor::deconstruct); } public ComponentGraph getNewComponentGraph(ComponentGraph oldGraph, Injector fallbackInjector, boolean restartOnRedeploy) { try { ComponentGraph newGraph = getConfigAndCreateGraph(oldGraph, fallbackInjector, restartOnRedeploy); newGraph.reuseNodes(oldGraph); constructComponents(newGraph); deconstructObsoleteComponents(oldGraph, newGraph); return newGraph; } catch (Throwable t) { invalidateGeneration(oldGraph.generation(), t); throw t; } } ComponentGraph getNewComponentGraph(ComponentGraph oldGraph) { return getNewComponentGraph(oldGraph, Guice.createInjector(), false); } ComponentGraph getNewComponentGraph() { return getNewComponentGraph(new ComponentGraph(), Guice.createInjector(), false); } private static String newGraphErrorMessage(long generation, Throwable cause, Duration maxWaitToExit) { String failedFirstMessage = "Failed to set up first component graph"; String failedNewMessage = "Failed to set up new component graph"; String constructMessage = " due to error when constructing one of the components"; String exitMessage = ". Exiting within " + maxWaitToExit.toString(); String retainMessage = ". Retaining previous component generation."; if (generation == 0) { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedFirstMessage + constructMessage + exitMessage; } else { return failedFirstMessage + exitMessage; } } else { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedNewMessage + constructMessage + retainMessage; } else { return failedNewMessage + retainMessage; } } } private ComponentGraph getConfigAndCreateGraph(ComponentGraph graph, Injector fallbackInjector, boolean restartOnRedeploy) { ConfigSnapshot snapshot; while (true) { snapshot = configurer.getConfigs(graph.configKeys(), leastGeneration, restartOnRedeploy); log.log(DEBUG, String.format("createNewGraph:\n" + "graph.configKeys = %s\n" + "graph.generation = %s\n" + "snapshot = %s\n", graph.configKeys(), graph.generation(), snapshot)); if (snapshot instanceof BootstrapConfigs) { if (getBootstrapGeneration() <= previousConfigGeneration) { throw new IllegalStateException(String.format( "Got bootstrap configs out of sequence for old config generation %d.\n" + "Previous config generation is %d", getBootstrapGeneration(), previousConfigGeneration)); } log.log(DEBUG, String.format( "Got new bootstrap generation\n" + "bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d\n", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration)); installBundles(snapshot.configs()); graph = createComponentsGraph(snapshot.configs(), getBootstrapGeneration(), fallbackInjector); } else if (snapshot instanceof ConfigRetriever.ComponentsConfigs) { break; } } log.log(DEBUG, String.format( "Got components configs,\n" + "bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration)); return createAndConfigureComponentsGraph(snapshot.configs(), fallbackInjector); } private long getBootstrapGeneration() { return configurer.getBootstrapGeneration(); } private long getComponentsGeneration() { return configurer.getComponentsGeneration(); } private ComponentGraph createAndConfigureComponentsGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> componentsConfigs, Injector fallbackInjector) { ComponentGraph componentGraph = createComponentsGraph(componentsConfigs, getComponentsGeneration(), fallbackInjector); componentGraph.setAvailableConfigs(componentsConfigs); return componentGraph; } private void injectNodes(ComponentsConfig config, ComponentGraph graph) { for (ComponentsConfig.Components component : config.components()) { Node componentNode = ComponentGraph.getNode(graph, component.id()); for (ComponentsConfig.Components.Inject inject : component.inject()) { componentNode.inject(ComponentGraph.getNode(graph, inject.id())); } } } public void installBundles(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs) { BundlesConfig bundlesConfig = getConfig(bundlesConfigKey, configsIncludingBootstrapConfigs); osgi.useBundles(bundlesConfig.bundle()); } private ComponentGraph createComponentsGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs, long generation, Injector fallbackInjector) { previousConfigGeneration = generation; ComponentGraph graph = new ComponentGraph(generation); ComponentsConfig componentsConfig = getConfig(componentsConfigKey, configsIncludingBootstrapConfigs); if (componentsConfig == null) { throw new ConfigurationRuntimeException("The set of all configs does not include a valid 'components' config. Config set: " + configsIncludingBootstrapConfigs.keySet()); } addNodes(componentsConfig, graph); injectNodes(componentsConfig, graph); graph.complete(fallbackInjector); return graph; } private void addNodes(ComponentsConfig componentsConfig, ComponentGraph graph) { for (ComponentsConfig.Components config : componentsConfig.components()) { BundleInstantiationSpecification specification = bundleInstatiationSpecification(config); Class<?> componentClass = osgi.resolveClass(specification); Node componentNode; if (RestApiContext.class.isAssignableFrom(componentClass)) { Class<? extends RestApiContext> nodeClass = componentClass.asSubclass(RestApiContext.class); componentNode = new JerseyNode(specification.id, config.configId(), nodeClass, osgi); } else { componentNode = new ComponentNode(specification.id, config.configId(), componentClass, null); } graph.add(componentNode); } } private void constructComponents(ComponentGraph graph) { graph.nodes().forEach(Node::newOrCachedInstance); } public void shutdown(ComponentGraph graph, ComponentDeconstructor deconstructor) { shutdownConfigurer(); if (graph != null) { deconstructAllComponents(graph, deconstructor); } } public void shutdownConfigurer() { configurer.shutdown(); } public void reloadConfig(long generation) { subscriberFactory.reloadActiveSubscribers(generation); } private void deconstructAllComponents(ComponentGraph graph, ComponentDeconstructor deconstructor) { graph.allComponentsAndProviders().forEach(deconstructor::deconstruct); } public static <T extends ConfigInstance> T getConfig(ConfigKey<T> key, Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configs) { ConfigInstance inst = configs.get(key); if (inst == null || key.getConfigClass() == null) { throw new RuntimeException("Missing config " + key); } return key.getConfigClass().cast(inst); } public static BundleInstantiationSpecification bundleInstatiationSpecification(ComponentsConfig.Components config) { return BundleInstantiationSpecification.getFromStrings(config.id(), config.classId(), config.bundle()); } }
I guess, it doesn't matter that much what exact value we use as long as it is large imho.
private void invalidateGeneration(long generation, Throwable cause) { Duration maxWaitToExit = Duration.ofDays(1); leastGeneration = Math.max(configurer.getComponentsGeneration(), configurer.getBootstrapGeneration()) + 1; if (!(cause instanceof InterruptedException) && !(cause instanceof ConfigInterruptedException)) { log.log(Level.WARNING, newGraphErrorMessage(generation, cause, maxWaitToExit), cause); } }
Duration maxWaitToExit = Duration.ofDays(1);
private void invalidateGeneration(long generation, Throwable cause) { Duration maxWaitToExit = Duration.ofDays(1); leastGeneration = Math.max(configurer.getComponentsGeneration(), configurer.getBootstrapGeneration()) + 1; if (!(cause instanceof InterruptedException) && !(cause instanceof ConfigInterruptedException)) { log.log(Level.WARNING, newGraphErrorMessage(generation, cause, maxWaitToExit), cause); } }
class Container { private static final Logger log = Logger.getLogger(Container.class.getName()); private final SubscriberFactory subscriberFactory; private ConfigKey<BundlesConfig> bundlesConfigKey; private ConfigKey<ComponentsConfig> componentsConfigKey; private final ComponentDeconstructor componentDeconstructor; private final Osgi osgi; private ConfigRetriever configurer; private long previousConfigGeneration = -1L; private long leastGeneration = -1L; public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor componentDeconstructor, Osgi osgi) { this.subscriberFactory = subscriberFactory; this.bundlesConfigKey = new ConfigKey<>(BundlesConfig.class, configId); this.componentsConfigKey = new ConfigKey<>(ComponentsConfig.class, configId); this.componentDeconstructor = componentDeconstructor; this.osgi = osgi; Set<ConfigKey<? extends ConfigInstance>> keySet = new HashSet<>(); keySet.add(bundlesConfigKey); keySet.add(componentsConfigKey); this.configurer = new ConfigRetriever(keySet, subscriberFactory::getSubscriber); } public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor componentDeconstructor) { this(subscriberFactory, configId, componentDeconstructor, new Osgi() { }); } private void deconstructObsoleteComponents(ComponentGraph oldGraph, ComponentGraph newGraph) { IdentityHashMap<Object, Object> oldComponents = new IdentityHashMap<>(); oldGraph.allComponentsAndProviders().forEach(c -> oldComponents.put(c, null)); newGraph.allComponentsAndProviders().forEach(oldComponents::remove); oldComponents.keySet().forEach(componentDeconstructor::deconstruct); } public ComponentGraph getNewComponentGraph(ComponentGraph oldGraph, Injector fallbackInjector, boolean restartOnRedeploy) { try { ComponentGraph newGraph = getConfigAndCreateGraph(oldGraph, fallbackInjector, restartOnRedeploy); newGraph.reuseNodes(oldGraph); constructComponents(newGraph); deconstructObsoleteComponents(oldGraph, newGraph); return newGraph; } catch (Throwable t) { invalidateGeneration(oldGraph.generation(), t); throw t; } } ComponentGraph getNewComponentGraph(ComponentGraph oldGraph) { return getNewComponentGraph(oldGraph, Guice.createInjector(), false); } ComponentGraph getNewComponentGraph() { return getNewComponentGraph(new ComponentGraph(), Guice.createInjector(), false); } private static String newGraphErrorMessage(long generation, Throwable cause, Duration maxWaitToExit) { String failedFirstMessage = "Failed to set up first component graph"; String failedNewMessage = "Failed to set up new component graph"; String constructMessage = " due to error when constructing one of the components"; String exitMessage = ". Exiting within " + maxWaitToExit.toString(); String retainMessage = ". Retaining previous component generation."; if (generation == 0) { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedFirstMessage + constructMessage + exitMessage; } else { return failedFirstMessage + exitMessage; } } else { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedNewMessage + constructMessage + retainMessage; } else { return failedNewMessage + retainMessage; } } } private ComponentGraph getConfigAndCreateGraph(ComponentGraph graph, Injector fallbackInjector, boolean restartOnRedeploy) { ConfigSnapshot snapshot; while (true) { snapshot = configurer.getConfigs(graph.configKeys(), leastGeneration, restartOnRedeploy); log.log(DEBUG, String.format("createNewGraph:\n" + "graph.configKeys = %s\n" + "graph.generation = %s\n" + "snapshot = %s\n", graph.configKeys(), graph.generation(), snapshot)); if (snapshot instanceof BootstrapConfigs) { if (getBootstrapGeneration() <= previousConfigGeneration) { throw new IllegalStateException(String.format( "Got bootstrap configs out of sequence for old config generation %d.\n" + "Previous config generation is %d", getBootstrapGeneration(), previousConfigGeneration)); } log.log(DEBUG, String.format( "Got new bootstrap generation\n" + "bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d\n", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration)); installBundles(snapshot.configs()); graph = createComponentsGraph(snapshot.configs(), getBootstrapGeneration(), fallbackInjector); } else if (snapshot instanceof ConfigRetriever.ComponentsConfigs) { break; } } log.log(DEBUG, String.format( "Got components configs,\n" + "bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration)); return createAndConfigureComponentsGraph(snapshot.configs(), fallbackInjector); } private long getBootstrapGeneration() { return configurer.getBootstrapGeneration(); } private long getComponentsGeneration() { return configurer.getComponentsGeneration(); } private ComponentGraph createAndConfigureComponentsGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> componentsConfigs, Injector fallbackInjector) { ComponentGraph componentGraph = createComponentsGraph(componentsConfigs, getComponentsGeneration(), fallbackInjector); componentGraph.setAvailableConfigs(componentsConfigs); return componentGraph; } private void injectNodes(ComponentsConfig config, ComponentGraph graph) { for (ComponentsConfig.Components component : config.components()) { Node componentNode = ComponentGraph.getNode(graph, component.id()); for (ComponentsConfig.Components.Inject inject : component.inject()) { componentNode.inject(ComponentGraph.getNode(graph, inject.id())); } } } public void installBundles(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs) { BundlesConfig bundlesConfig = getConfig(bundlesConfigKey, configsIncludingBootstrapConfigs); osgi.useBundles(bundlesConfig.bundle()); } private ComponentGraph createComponentsGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs, long generation, Injector fallbackInjector) { previousConfigGeneration = generation; ComponentGraph graph = new ComponentGraph(generation); ComponentsConfig componentsConfig = getConfig(componentsConfigKey, configsIncludingBootstrapConfigs); if (componentsConfig == null) { throw new ConfigurationRuntimeException("The set of all configs does not include a valid 'components' config. Config set: " + configsIncludingBootstrapConfigs.keySet()); } addNodes(componentsConfig, graph); injectNodes(componentsConfig, graph); graph.complete(fallbackInjector); return graph; } private void addNodes(ComponentsConfig componentsConfig, ComponentGraph graph) { for (ComponentsConfig.Components config : componentsConfig.components()) { BundleInstantiationSpecification specification = bundleInstatiationSpecification(config); Class<?> componentClass = osgi.resolveClass(specification); Node componentNode; if (RestApiContext.class.isAssignableFrom(componentClass)) { Class<? extends RestApiContext> nodeClass = componentClass.asSubclass(RestApiContext.class); componentNode = new JerseyNode(specification.id, config.configId(), nodeClass, osgi); } else { componentNode = new ComponentNode(specification.id, config.configId(), componentClass, null); } graph.add(componentNode); } } private void constructComponents(ComponentGraph graph) { graph.nodes().forEach(Node::newOrCachedInstance); } public void shutdown(ComponentGraph graph, ComponentDeconstructor deconstructor) { shutdownConfigurer(); if (graph != null) { deconstructAllComponents(graph, deconstructor); } } public void shutdownConfigurer() { configurer.shutdown(); } public void reloadConfig(long generation) { subscriberFactory.reloadActiveSubscribers(generation); } private void deconstructAllComponents(ComponentGraph graph, ComponentDeconstructor deconstructor) { graph.allComponentsAndProviders().forEach(deconstructor::deconstruct); } public static <T extends ConfigInstance> T getConfig(ConfigKey<T> key, Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configs) { ConfigInstance inst = configs.get(key); if (inst == null || key.getConfigClass() == null) { throw new RuntimeException("Missing config " + key); } return key.getConfigClass().cast(inst); } public static BundleInstantiationSpecification bundleInstatiationSpecification(ComponentsConfig.Components config) { return BundleInstantiationSpecification.getFromStrings(config.id(), config.classId(), config.bundle()); } }
class Container { private static final Logger log = Logger.getLogger(Container.class.getName()); private final SubscriberFactory subscriberFactory; private ConfigKey<BundlesConfig> bundlesConfigKey; private ConfigKey<ComponentsConfig> componentsConfigKey; private final ComponentDeconstructor componentDeconstructor; private final Osgi osgi; private ConfigRetriever configurer; private long previousConfigGeneration = -1L; private long leastGeneration = -1L; public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor componentDeconstructor, Osgi osgi) { this.subscriberFactory = subscriberFactory; this.bundlesConfigKey = new ConfigKey<>(BundlesConfig.class, configId); this.componentsConfigKey = new ConfigKey<>(ComponentsConfig.class, configId); this.componentDeconstructor = componentDeconstructor; this.osgi = osgi; Set<ConfigKey<? extends ConfigInstance>> keySet = new HashSet<>(); keySet.add(bundlesConfigKey); keySet.add(componentsConfigKey); this.configurer = new ConfigRetriever(keySet, subscriberFactory::getSubscriber); } public Container(SubscriberFactory subscriberFactory, String configId, ComponentDeconstructor componentDeconstructor) { this(subscriberFactory, configId, componentDeconstructor, new Osgi() { }); } private void deconstructObsoleteComponents(ComponentGraph oldGraph, ComponentGraph newGraph) { IdentityHashMap<Object, Object> oldComponents = new IdentityHashMap<>(); oldGraph.allComponentsAndProviders().forEach(c -> oldComponents.put(c, null)); newGraph.allComponentsAndProviders().forEach(oldComponents::remove); oldComponents.keySet().forEach(componentDeconstructor::deconstruct); } public ComponentGraph getNewComponentGraph(ComponentGraph oldGraph, Injector fallbackInjector, boolean restartOnRedeploy) { try { ComponentGraph newGraph = getConfigAndCreateGraph(oldGraph, fallbackInjector, restartOnRedeploy); newGraph.reuseNodes(oldGraph); constructComponents(newGraph); deconstructObsoleteComponents(oldGraph, newGraph); return newGraph; } catch (Throwable t) { invalidateGeneration(oldGraph.generation(), t); throw t; } } ComponentGraph getNewComponentGraph(ComponentGraph oldGraph) { return getNewComponentGraph(oldGraph, Guice.createInjector(), false); } ComponentGraph getNewComponentGraph() { return getNewComponentGraph(new ComponentGraph(), Guice.createInjector(), false); } private static String newGraphErrorMessage(long generation, Throwable cause, Duration maxWaitToExit) { String failedFirstMessage = "Failed to set up first component graph"; String failedNewMessage = "Failed to set up new component graph"; String constructMessage = " due to error when constructing one of the components"; String exitMessage = ". Exiting within " + maxWaitToExit.toString(); String retainMessage = ". Retaining previous component generation."; if (generation == 0) { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedFirstMessage + constructMessage + exitMessage; } else { return failedFirstMessage + exitMessage; } } else { if (cause instanceof ComponentNode.ComponentConstructorException) { return failedNewMessage + constructMessage + retainMessage; } else { return failedNewMessage + retainMessage; } } } private ComponentGraph getConfigAndCreateGraph(ComponentGraph graph, Injector fallbackInjector, boolean restartOnRedeploy) { ConfigSnapshot snapshot; while (true) { snapshot = configurer.getConfigs(graph.configKeys(), leastGeneration, restartOnRedeploy); log.log(DEBUG, String.format("createNewGraph:\n" + "graph.configKeys = %s\n" + "graph.generation = %s\n" + "snapshot = %s\n", graph.configKeys(), graph.generation(), snapshot)); if (snapshot instanceof BootstrapConfigs) { if (getBootstrapGeneration() <= previousConfigGeneration) { throw new IllegalStateException(String.format( "Got bootstrap configs out of sequence for old config generation %d.\n" + "Previous config generation is %d", getBootstrapGeneration(), previousConfigGeneration)); } log.log(DEBUG, String.format( "Got new bootstrap generation\n" + "bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d\n", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration)); installBundles(snapshot.configs()); graph = createComponentsGraph(snapshot.configs(), getBootstrapGeneration(), fallbackInjector); } else if (snapshot instanceof ConfigRetriever.ComponentsConfigs) { break; } } log.log(DEBUG, String.format( "Got components configs,\n" + "bootstrap generation = %d\n" + "components generation: %d\n" + "previous generation: %d", getBootstrapGeneration(), getComponentsGeneration(), previousConfigGeneration)); return createAndConfigureComponentsGraph(snapshot.configs(), fallbackInjector); } private long getBootstrapGeneration() { return configurer.getBootstrapGeneration(); } private long getComponentsGeneration() { return configurer.getComponentsGeneration(); } private ComponentGraph createAndConfigureComponentsGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> componentsConfigs, Injector fallbackInjector) { ComponentGraph componentGraph = createComponentsGraph(componentsConfigs, getComponentsGeneration(), fallbackInjector); componentGraph.setAvailableConfigs(componentsConfigs); return componentGraph; } private void injectNodes(ComponentsConfig config, ComponentGraph graph) { for (ComponentsConfig.Components component : config.components()) { Node componentNode = ComponentGraph.getNode(graph, component.id()); for (ComponentsConfig.Components.Inject inject : component.inject()) { componentNode.inject(ComponentGraph.getNode(graph, inject.id())); } } } public void installBundles(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs) { BundlesConfig bundlesConfig = getConfig(bundlesConfigKey, configsIncludingBootstrapConfigs); osgi.useBundles(bundlesConfig.bundle()); } private ComponentGraph createComponentsGraph(Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configsIncludingBootstrapConfigs, long generation, Injector fallbackInjector) { previousConfigGeneration = generation; ComponentGraph graph = new ComponentGraph(generation); ComponentsConfig componentsConfig = getConfig(componentsConfigKey, configsIncludingBootstrapConfigs); if (componentsConfig == null) { throw new ConfigurationRuntimeException("The set of all configs does not include a valid 'components' config. Config set: " + configsIncludingBootstrapConfigs.keySet()); } addNodes(componentsConfig, graph); injectNodes(componentsConfig, graph); graph.complete(fallbackInjector); return graph; } private void addNodes(ComponentsConfig componentsConfig, ComponentGraph graph) { for (ComponentsConfig.Components config : componentsConfig.components()) { BundleInstantiationSpecification specification = bundleInstatiationSpecification(config); Class<?> componentClass = osgi.resolveClass(specification); Node componentNode; if (RestApiContext.class.isAssignableFrom(componentClass)) { Class<? extends RestApiContext> nodeClass = componentClass.asSubclass(RestApiContext.class); componentNode = new JerseyNode(specification.id, config.configId(), nodeClass, osgi); } else { componentNode = new ComponentNode(specification.id, config.configId(), componentClass, null); } graph.add(componentNode); } } private void constructComponents(ComponentGraph graph) { graph.nodes().forEach(Node::newOrCachedInstance); } public void shutdown(ComponentGraph graph, ComponentDeconstructor deconstructor) { shutdownConfigurer(); if (graph != null) { deconstructAllComponents(graph, deconstructor); } } public void shutdownConfigurer() { configurer.shutdown(); } public void reloadConfig(long generation) { subscriberFactory.reloadActiveSubscribers(generation); } private void deconstructAllComponents(ComponentGraph graph, ComponentDeconstructor deconstructor) { graph.allComponentsAndProviders().forEach(deconstructor::deconstruct); } public static <T extends ConfigInstance> T getConfig(ConfigKey<T> key, Map<ConfigKey<? extends ConfigInstance>, ConfigInstance> configs) { ConfigInstance inst = configs.get(key); if (inst == null || key.getConfigClass() == null) { throw new RuntimeException("Missing config " + key); } return key.getConfigClass().cast(inst); } public static BundleInstantiationSpecification bundleInstatiationSpecification(ComponentsConfig.Components config) { return BundleInstantiationSpecification.getFromStrings(config.id(), config.classId(), config.bundle()); } }
Move this outside the if statement as well?=
private VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState, boolean complete, FileDistributor fileDistributor) throws IOException, SAXException { super("vespamodel"); this.validationOverrides = deployState.validationOverrides(); configModelRegistry = new VespaConfigModelRegistry(configModelRegistry); VespaModelBuilder builder = new VespaDomBuilder(); this.applicationPackage = deployState.getApplicationPackage(); this.deployState = deployState; this.deployLogger = deployState.getDeployLogger(); root = builder.getRoot(VespaModel.ROOT_CONFIGID, deployState, this); HostSystem hostSystem = root.getHostSystem(); createGlobalRankProfiles(deployState.getDeployLogger(), deployState.getImportedModels(), deployState.rankProfileRegistry(), deployState.getQueryProfiles()); this.rankProfileList = new RankProfileList(null, rankingConstants, AttributeFields.empty, deployState.rankProfileRegistry(), deployState.getQueryProfiles().getRegistry(), deployState.getImportedModels()); if (complete) { configModelRepo.readConfigModels(deployState, this, builder, root, configModelRegistry); addServiceClusters(deployState.getApplicationPackage(), builder); this.allocatedHosts = AllocatedHosts.withHosts(hostSystem.getHostSpecs()); setupRouting(); this.fileDistributor = root.getFileDistributionConfigProducer().getFileDistributor(); getAdmin().addPerHostServices(hostSystem.getHosts(), deployState); freezeModelTopology(); root.prepare(configModelRepo); configModelRepo.prepareConfigModels(); this.deployState = null; validateWrapExceptions(); this.deployLogger = null; } else { this.allocatedHosts = AllocatedHosts.withHosts(hostSystem.getHostSpecs()); this.fileDistributor = fileDistributor; } this.deployState = null; }
this.deployLogger = null;
private VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState, boolean complete, FileDistributor fileDistributor) throws IOException, SAXException { super("vespamodel"); this.validationOverrides = deployState.validationOverrides(); configModelRegistry = new VespaConfigModelRegistry(configModelRegistry); VespaModelBuilder builder = new VespaDomBuilder(); this.applicationPackage = deployState.getApplicationPackage(); root = builder.getRoot(VespaModel.ROOT_CONFIGID, deployState, this); HostSystem hostSystem = root.getHostSystem(); createGlobalRankProfiles(deployState.getDeployLogger(), deployState.getImportedModels(), deployState.rankProfileRegistry(), deployState.getQueryProfiles()); this.rankProfileList = new RankProfileList(null, rankingConstants, AttributeFields.empty, deployState.rankProfileRegistry(), deployState.getQueryProfiles().getRegistry(), deployState.getImportedModels()); if (complete) { this.deployLogger = deployState.getDeployLogger(); this.deployState = deployState; configModelRepo.readConfigModels(deployState, this, builder, root, configModelRegistry); this.deployState = null; addServiceClusters(deployState.getApplicationPackage(), builder); this.allocatedHosts = AllocatedHosts.withHosts(hostSystem.getHostSpecs()); setupRouting(deployState); this.fileDistributor = root.getFileDistributionConfigProducer().getFileDistributor(); getAdmin().addPerHostServices(hostSystem.getHosts(), deployState); this.deployLogger = null; freezeModelTopology(); root.prepare(configModelRepo); configModelRepo.prepareConfigModels(deployState); validateWrapExceptions(); } else { this.allocatedHosts = AllocatedHosts.withHosts(hostSystem.getHostSpecs()); this.fileDistributor = fileDistributor; this.deployState = deployState; this.deployLogger = deployState.getDeployLogger(); } }
class VespaModel extends AbstractConfigProducerRoot implements Serializable, Model { private static final long serialVersionUID = 1L; public static final Logger log = Logger.getLogger(VespaModel.class.getPackage().toString()); private final ConfigModelRepo configModelRepo = new ConfigModelRepo(); private final AllocatedHosts allocatedHosts; /** The config id for the root config producer */ public static final String ROOT_CONFIGID = ""; private final ApplicationConfigProducerRoot root; private final ApplicationPackage applicationPackage; /** Generic service instances - service clusters which have no specific model */ private final List<ServiceCluster> serviceClusters = new ArrayList<>(); /** The global rank profiles of this model */ private final RankProfileList rankProfileList; /** The global ranking constants of this model */ private final RankingConstants rankingConstants = new RankingConstants(); private DeployState deployState; private DeployLogger deployLogger; /** The validation overrides of this. This is never null. */ private final ValidationOverrides validationOverrides; private final FileDistributor fileDistributor; /** Creates a Vespa Model from internal model types only */ public VespaModel(ApplicationPackage app) throws IOException, SAXException { this(app, new NullConfigModelRegistry()); } /** Creates a Vespa Model from internal model types only */ public VespaModel(DeployState deployState) throws IOException, SAXException { this(new NullConfigModelRegistry(), deployState); } /** * Constructs vespa model using config given in app * * @param app the application to create a model from * @param configModelRegistry a registry of config model "main" classes which may be used * to instantiate config models */ public VespaModel(ApplicationPackage app, ConfigModelRegistry configModelRegistry) throws IOException, SAXException { this(configModelRegistry, new DeployState.Builder().applicationPackage(app).build()); } /** * Constructs vespa model using config given in app * * @param configModelRegistry a registry of config model "main" classes which may be used * to instantiate config models * @param deployState the global deploy state to use for this model. */ public VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState) throws IOException, SAXException { this(configModelRegistry, deployState, true, null); } /** Returns the application package owning this */ public ApplicationPackage applicationPackage() { return applicationPackage; } /** Returns the global ranking constants of this */ public RankingConstants rankingConstants() { return rankingConstants; } /** Creates a mutable model with no services instantiated */ public static VespaModel createIncomplete(DeployState deployState) throws IOException, SAXException { return new VespaModel(new NullConfigModelRegistry(), deployState, false, new FileDistributor(deployState.getFileRegistry(), null)); } private void validateWrapExceptions() { try { validate(); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new RuntimeException("Error while validating model:", e); } } /** Adds generic application specific clusters of services */ private void addServiceClusters(ApplicationPackage app, VespaModelBuilder builder) { serviceClusters.addAll(builder.getClusters(app, this)); } /** * Creates a rank profile not attached to any search definition, for each imported model in the application package, * and adds it to the given rank profile registry. */ private void createGlobalRankProfiles(DeployLogger deployLogger, ImportedModels importedModels, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) { if ( ! importedModels.all().isEmpty()) { for (ImportedModel model : importedModels.all()) { RankProfile profile = new RankProfile(model.name(), this, rankProfileRegistry); rankProfileRegistry.add(profile); ConvertedModel convertedModel = ConvertedModel.fromSource(new ModelName(model.name()), model.name(), profile, queryProfiles.getRegistry(), model); convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false)); } } else { ApplicationFile generatedModelsDir = applicationPackage.getFile(ApplicationPackage.MODELS_GENERATED_REPLICATED_DIR); for (ApplicationFile generatedModelDir : generatedModelsDir.listFiles()) { String modelName = generatedModelDir.getPath().last(); if (modelName.contains(".")) continue; RankProfile profile = new RankProfile(modelName, this, rankProfileRegistry); rankProfileRegistry.add(profile); ConvertedModel convertedModel = ConvertedModel.fromStore(new ModelName(modelName), modelName, profile); convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false)); } } new Processing().processRankProfiles(deployLogger, rankProfileRegistry, queryProfiles, true, false); } /** Returns the global rank profiles as a rank profile list */ public RankProfileList rankProfileList() { return rankProfileList; } private void setupRouting() { root.setupRouting(this, configModelRepo); } /** Returns the one and only HostSystem of this VespaModel */ public HostSystem getHostSystem() { return root.getHostSystem(); } /** Return a collection of all hostnames used in this application */ @Override public Set<HostInfo> getHosts() { return getHostSystem().getHosts().stream() .map(HostResource::getHostInfo) .collect(Collectors.toCollection(LinkedHashSet::new)); } public FileDistributor getFileDistributor() { return fileDistributor; } @Override public Set<FileReference> fileReferences() { return fileDistributor.allFilesToSend(); } /** Returns this models Vespa instance */ public ApplicationConfigProducerRoot getVespa() { return root; } @Override public boolean allowModelVersionMismatch(Instant now) { return validationOverrides.allows(ValidationId.configModelVersionMismatch, now) || validationOverrides.allows(ValidationId.skipOldConfigModels, now); } @Override public boolean skipOldConfigModels(Instant now) { return validationOverrides.allows(ValidationId.skipOldConfigModels, now); } /** * Resolves config of the given type and config id, by first instantiating the correct {@link com.yahoo.config.ConfigInstance.Builder}, * calling {@link * types in the model. * * @param clazz The type of config * @param configId The config id * @return A config instance of the given type */ public <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> clazz, String configId) { try { ConfigInstance.Builder builder = newBuilder(clazz); getConfig(builder, configId); return newConfigInstance(clazz, builder); } catch (Exception e) { throw new RuntimeException(e); } } /** * Populates an instance of configClass with config produced by configProducer. */ public static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> configClass, ConfigProducer configProducer) { try { Builder builder = newBuilder(configClass); populateConfigBuilder(builder, configProducer); return newConfigInstance(configClass, builder); } catch (Exception e) { throw new RuntimeException("Failed getting config for class " + configClass.getName(), e); } } private static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE newConfigInstance(Class<CONFIGTYPE> configClass, Builder builder) throws NoSuchMethodException, InstantiationException, IllegalAccessException, java.lang.reflect.InvocationTargetException { Constructor<CONFIGTYPE> constructor = configClass.getConstructor(builder.getClass()); return constructor.newInstance(builder); } private static Builder newBuilder(Class<? extends ConfigInstance> configClass) throws ReflectiveOperationException { Class builderClazz = configClass.getClassLoader().loadClass(configClass.getName() + "$Builder"); return (Builder)builderClazz.getDeclaredConstructor().newInstance(); } /** * Throw if the config id does not exist in the model. * * @param configId a config id */ protected void checkId(String configId) { if ( ! id2producer.containsKey(configId)) { log.log(LogLevel.DEBUG, "Invalid config id: " + configId); } } /** * Resolves config for a given config id and populates the given builder with the config. * * @param builder a configinstance builder * @param configId the config id for the config client * @return the builder if a producer was found, and it did apply config, null otherwise */ @SuppressWarnings("unchecked") @Override public ConfigInstance.Builder getConfig(ConfigInstance.Builder builder, String configId) { checkId(configId); Optional<ConfigProducer> configProducer = getConfigProducer(configId); if ( ! configProducer.isPresent()) return null; populateConfigBuilder(builder, configProducer.get()); return builder; } private static void populateConfigBuilder(Builder builder, ConfigProducer configProducer) { boolean found = configProducer.cascadeConfig(builder); boolean foundOverride = configProducer.addUserConfig(builder); if (logDebug()) { log.log(LogLevel.DEBUG, "Trying to get config for " + builder.getClass().getDeclaringClass().getName() + " for config id " + quote(configProducer.getConfigId()) + ", found=" + found + ", foundOverride=" + foundOverride); } } /** * Resolve config for a given key and config definition * * @param configKey The key to resolve. * @param targetDef The config definition to use for the schema * @return The payload as a list of strings */ @Override public ConfigPayload getConfig(ConfigKey configKey, com.yahoo.vespa.config.buildergen.ConfigDefinition targetDef) { ConfigBuilder builder = InstanceResolver.resolveToBuilder(configKey, this, targetDef); if (builder != null) { log.log(LogLevel.DEBUG, () -> "Found builder for " + configKey); ConfigPayload payload; InnerCNode innerCNode = targetDef != null ? targetDef.getCNode() : null; if (builder instanceof GenericConfig.GenericConfigBuilder) { payload = getConfigFromGenericBuilder(builder); } else { payload = getConfigFromBuilder(configKey, builder, innerCNode); } return (innerCNode != null) ? payload.applyDefaultsFromDef(innerCNode) : payload; } return null; } private ConfigPayload getConfigFromBuilder(ConfigKey configKey, ConfigBuilder builder, InnerCNode targetDef) { try { ConfigInstance instance = InstanceResolver.resolveToInstance(configKey, builder, targetDef); log.log(LogLevel.DEBUG, () -> "getConfigFromBuilder for " + configKey + ",instance=" + instance); return ConfigPayload.fromInstance(instance); } catch (ConfigurationRuntimeException e) { log.log(LogLevel.INFO, "Error resolving instance for key '" + configKey + "', returning empty config: " + Exceptions.toMessageString(e)); return ConfigPayload.fromBuilder(new ConfigPayloadBuilder()); } } private ConfigPayload getConfigFromGenericBuilder(ConfigBuilder builder) { return ((GenericConfig.GenericConfigBuilder) builder).getPayload(); } @Override public Set<ConfigKey<?>> allConfigsProduced() { Set<ConfigKey<?>> keySet = new LinkedHashSet<>(); for (ConfigProducer producer : id2producer().values()) { keySet.addAll(configsProduced(producer)); } return keySet; } public ConfigInstance.Builder createBuilder(ConfigDefinitionKey key, ConfigDefinition targetDef) { String className = createClassName(key.getName()); Class<?> clazz; final String fullClassName = InstanceResolver.packageName(key) + "." + className; final String builderName = fullClassName + "$Builder"; final String producerName = fullClassName + "$Producer"; ClassLoader classLoader = getConfigClassLoader(producerName); if (classLoader == null) { classLoader = getClass().getClassLoader(); if (logDebug()) { log.log(LogLevel.DEBUG, "No producer found to get classloader from for " + fullClassName + ". Using default"); } } try { clazz = classLoader.loadClass(builderName); } catch (ClassNotFoundException e) { if (logDebug()) { log.log(LogLevel.DEBUG, "Tried to load " + builderName + ", not found, trying with generic builder"); } return new GenericConfig.GenericConfigBuilder(key, new ConfigPayloadBuilder()); } Object i; try { i = clazz.getDeclaredConstructor().newInstance(); } catch (ReflectiveOperationException e) { throw new ConfigurationRuntimeException(e); } if (!(i instanceof ConfigInstance.Builder)) { throw new ConfigurationRuntimeException(fullClassName + " is not a ConfigInstance.Builder, can not produce config for the name '" + key.getName() + "'."); } return (ConfigInstance.Builder) i; } private static boolean logDebug() { return log.isLoggable(LogLevel.DEBUG); } /** * The set of all config ids present * @return set of config ids */ public Set<String> allConfigIds() { return id2producer.keySet(); } @Override public void distributeFiles(FileDistribution fileDistribution) { getFileDistributor().sendDeployedFiles(fileDistribution); } @Override public AllocatedHosts allocatedHosts() { return allocatedHosts; } private static Set<ConfigKey<?>> configsProduced(ConfigProducer cp) { Set<ConfigKey<?>> ret = ReflectionUtil.configsProducedByInterface(cp.getClass(), cp.getConfigId()); UserConfigRepo userConfigs = cp.getUserConfigs(); for (ConfigDefinitionKey userKey : userConfigs.configsProduced()) { ret.add(new ConfigKey<>(userKey.getName(), cp.getConfigId(), userKey.getNamespace())); } return ret; } @Override public DeployState getDeployState() { if (deployState == null) throw new IllegalStateException("Cannot call getDeployState() once model has been built"); return deployState; } /** * @return an unmodifiable copy of the set of configIds in this VespaModel. */ public Set<String> getConfigIds() { return Collections.unmodifiableSet(id2producer.keySet()); } /** * Returns the admin component of the vespamodel. * * @return Admin */ public Admin getAdmin() { return root.getAdmin(); } /** * Adds the descendant (at any depth level), so it can be looked up * on configId in the Map. * * @param configId the id to register with, not necessarily equal to descendant.getConfigId(). * @param descendant The configProducer descendant to add */ public void addDescendant(String configId, AbstractConfigProducer descendant) { if (id2producer.containsKey(configId)) { throw new RuntimeException ("Config ID '" + configId + "' cannot be reserved by an instance of class '" + descendant.getClass().getName() + "' since it is already used by an instance of class '" + id2producer.get(configId).getClass().getName() + "'. (This is commonly caused by service/node index " + "collisions in the config.)"); } id2producer.put(configId, descendant); } /** * Writes MODEL.cfg files for all config producers. * * @param baseDirectory dir to write files to */ public void writeFiles(File baseDirectory) throws IOException { super.writeFiles(baseDirectory); for (ConfigProducer cp : id2producer.values()) { try { File destination = new File(baseDirectory, cp.getConfigId().replace("/", File.separator)); cp.writeFiles(destination); } catch (IOException e) { throw new IOException(cp.getConfigId() + ": " + e.getMessage()); } } } public Clients getClients() { return configModelRepo.getClients(); } /** Returns all search clusters, both in Search and Content */ public List<AbstractSearchCluster> getSearchClusters() { return Content.getSearchClusters(configModelRepo()); } /** Returns a map of content clusters by ID */ public Map<String, ContentCluster> getContentClusters() { Map<String, ContentCluster> clusters = new LinkedHashMap<>(); for (Content model : configModelRepo.getModels(Content.class)) { clusters.put(model.getId(), model.getCluster()); } return Collections.unmodifiableMap(clusters); } /** Returns a map of container clusters by ID */ public Map<String, ContainerCluster> getContainerClusters() { Map<String, ContainerCluster> clusters = new LinkedHashMap<>(); for (ContainerModel model : configModelRepo.getModels(ContainerModel.class)) { clusters.put(model.getId(), model.getCluster()); } return Collections.unmodifiableMap(clusters); } /** Returns the routing config model. This might be null. */ public Routing getRouting() { return configModelRepo.getRouting(); } public FileDistributionConfigProducer getFileDistributionConfigProducer() { return root.getFileDistributionConfigProducer(); } /** The clusters of application specific generic services */ public List<ServiceCluster> serviceClusters() { return serviceClusters; } /** Returns an unmodifiable view of the mapping of config id to {@link ConfigProducer} */ public Map<String, ConfigProducer> id2producer() { return Collections.unmodifiableMap(id2producer); } /** * @return this root's model repository */ public ConfigModelRepo configModelRepo() { return configModelRepo; } @Override public DeployLogger deployLogger() { return deployLogger; } }
class VespaModel extends AbstractConfigProducerRoot implements Serializable, Model { private static final long serialVersionUID = 1L; public static final Logger log = Logger.getLogger(VespaModel.class.getPackage().toString()); private final ConfigModelRepo configModelRepo = new ConfigModelRepo(); private final AllocatedHosts allocatedHosts; /** The config id for the root config producer */ public static final String ROOT_CONFIGID = ""; private final ApplicationConfigProducerRoot root; private final ApplicationPackage applicationPackage; /** Generic service instances - service clusters which have no specific model */ private final List<ServiceCluster> serviceClusters = new ArrayList<>(); /** The global rank profiles of this model */ private final RankProfileList rankProfileList; /** The global ranking constants of this model */ private final RankingConstants rankingConstants = new RankingConstants(); private DeployState deployState; private DeployLogger deployLogger; /** The validation overrides of this. This is never null. */ private final ValidationOverrides validationOverrides; private final FileDistributor fileDistributor; /** Creates a Vespa Model from internal model types only */ public VespaModel(ApplicationPackage app) throws IOException, SAXException { this(app, new NullConfigModelRegistry()); } /** Creates a Vespa Model from internal model types only */ public VespaModel(DeployState deployState) throws IOException, SAXException { this(new NullConfigModelRegistry(), deployState); } /** * Constructs vespa model using config given in app * * @param app the application to create a model from * @param configModelRegistry a registry of config model "main" classes which may be used * to instantiate config models */ public VespaModel(ApplicationPackage app, ConfigModelRegistry configModelRegistry) throws IOException, SAXException { this(configModelRegistry, new DeployState.Builder().applicationPackage(app).build()); } /** * Constructs vespa model using config given in app * * @param configModelRegistry a registry of config model "main" classes which may be used * to instantiate config models * @param deployState the global deploy state to use for this model. */ public VespaModel(ConfigModelRegistry configModelRegistry, DeployState deployState) throws IOException, SAXException { this(configModelRegistry, deployState, true, null); } /** Returns the application package owning this */ public ApplicationPackage applicationPackage() { return applicationPackage; } /** Returns the global ranking constants of this */ public RankingConstants rankingConstants() { return rankingConstants; } /** Creates a mutable model with no services instantiated */ public static VespaModel createIncomplete(DeployState deployState) throws IOException, SAXException { return new VespaModel(new NullConfigModelRegistry(), deployState, false, new FileDistributor(deployState.getFileRegistry(), null)); } private void validateWrapExceptions() { try { validate(); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new RuntimeException("Error while validating model:", e); } } /** Adds generic application specific clusters of services */ private void addServiceClusters(ApplicationPackage app, VespaModelBuilder builder) { serviceClusters.addAll(builder.getClusters(app, this)); } /** * Creates a rank profile not attached to any search definition, for each imported model in the application package, * and adds it to the given rank profile registry. */ private void createGlobalRankProfiles(DeployLogger deployLogger, ImportedModels importedModels, RankProfileRegistry rankProfileRegistry, QueryProfiles queryProfiles) { if ( ! importedModels.all().isEmpty()) { for (ImportedModel model : importedModels.all()) { RankProfile profile = new RankProfile(model.name(), this, rankProfileRegistry); rankProfileRegistry.add(profile); ConvertedModel convertedModel = ConvertedModel.fromSource(new ModelName(model.name()), model.name(), profile, queryProfiles.getRegistry(), model); convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false)); } } else { ApplicationFile generatedModelsDir = applicationPackage.getFile(ApplicationPackage.MODELS_GENERATED_REPLICATED_DIR); for (ApplicationFile generatedModelDir : generatedModelsDir.listFiles()) { String modelName = generatedModelDir.getPath().last(); if (modelName.contains(".")) continue; RankProfile profile = new RankProfile(modelName, this, rankProfileRegistry); rankProfileRegistry.add(profile); ConvertedModel convertedModel = ConvertedModel.fromStore(new ModelName(modelName), modelName, profile); convertedModel.expressions().values().forEach(f -> profile.addFunction(f, false)); } } new Processing().processRankProfiles(deployLogger, rankProfileRegistry, queryProfiles, true, false); } /** Returns the global rank profiles as a rank profile list */ public RankProfileList rankProfileList() { return rankProfileList; } private void setupRouting(DeployState deployState) { root.setupRouting(deployState, this, configModelRepo); } /** Returns the one and only HostSystem of this VespaModel */ public HostSystem getHostSystem() { return root.getHostSystem(); } /** Return a collection of all hostnames used in this application */ @Override public Set<HostInfo> getHosts() { return getHostSystem().getHosts().stream() .map(HostResource::getHostInfo) .collect(Collectors.toCollection(LinkedHashSet::new)); } public FileDistributor getFileDistributor() { return fileDistributor; } @Override public Set<FileReference> fileReferences() { return fileDistributor.allFilesToSend(); } /** Returns this models Vespa instance */ public ApplicationConfigProducerRoot getVespa() { return root; } @Override public boolean allowModelVersionMismatch(Instant now) { return validationOverrides.allows(ValidationId.configModelVersionMismatch, now) || validationOverrides.allows(ValidationId.skipOldConfigModels, now); } @Override public boolean skipOldConfigModels(Instant now) { return validationOverrides.allows(ValidationId.skipOldConfigModels, now); } /** * Resolves config of the given type and config id, by first instantiating the correct {@link com.yahoo.config.ConfigInstance.Builder}, * calling {@link * types in the model. * * @param clazz The type of config * @param configId The config id * @return A config instance of the given type */ public <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> clazz, String configId) { try { ConfigInstance.Builder builder = newBuilder(clazz); getConfig(builder, configId); return newConfigInstance(clazz, builder); } catch (Exception e) { throw new RuntimeException(e); } } /** * Populates an instance of configClass with config produced by configProducer. */ public static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE getConfig(Class<CONFIGTYPE> configClass, ConfigProducer configProducer) { try { Builder builder = newBuilder(configClass); populateConfigBuilder(builder, configProducer); return newConfigInstance(configClass, builder); } catch (Exception e) { throw new RuntimeException("Failed getting config for class " + configClass.getName(), e); } } private static <CONFIGTYPE extends ConfigInstance> CONFIGTYPE newConfigInstance(Class<CONFIGTYPE> configClass, Builder builder) throws NoSuchMethodException, InstantiationException, IllegalAccessException, java.lang.reflect.InvocationTargetException { Constructor<CONFIGTYPE> constructor = configClass.getConstructor(builder.getClass()); return constructor.newInstance(builder); } private static Builder newBuilder(Class<? extends ConfigInstance> configClass) throws ReflectiveOperationException { Class builderClazz = configClass.getClassLoader().loadClass(configClass.getName() + "$Builder"); return (Builder)builderClazz.getDeclaredConstructor().newInstance(); } /** * Throw if the config id does not exist in the model. * * @param configId a config id */ protected void checkId(String configId) { if ( ! id2producer.containsKey(configId)) { log.log(LogLevel.DEBUG, "Invalid config id: " + configId); } } /** * Resolves config for a given config id and populates the given builder with the config. * * @param builder a configinstance builder * @param configId the config id for the config client * @return the builder if a producer was found, and it did apply config, null otherwise */ @SuppressWarnings("unchecked") @Override public ConfigInstance.Builder getConfig(ConfigInstance.Builder builder, String configId) { checkId(configId); Optional<ConfigProducer> configProducer = getConfigProducer(configId); if ( ! configProducer.isPresent()) return null; populateConfigBuilder(builder, configProducer.get()); return builder; } private static void populateConfigBuilder(Builder builder, ConfigProducer configProducer) { boolean found = configProducer.cascadeConfig(builder); boolean foundOverride = configProducer.addUserConfig(builder); if (logDebug()) { log.log(LogLevel.DEBUG, "Trying to get config for " + builder.getClass().getDeclaringClass().getName() + " for config id " + quote(configProducer.getConfigId()) + ", found=" + found + ", foundOverride=" + foundOverride); } } /** * Resolve config for a given key and config definition * * @param configKey The key to resolve. * @param targetDef The config definition to use for the schema * @return The payload as a list of strings */ @Override public ConfigPayload getConfig(ConfigKey configKey, com.yahoo.vespa.config.buildergen.ConfigDefinition targetDef) { ConfigBuilder builder = InstanceResolver.resolveToBuilder(configKey, this, targetDef); if (builder != null) { log.log(LogLevel.DEBUG, () -> "Found builder for " + configKey); ConfigPayload payload; InnerCNode innerCNode = targetDef != null ? targetDef.getCNode() : null; if (builder instanceof GenericConfig.GenericConfigBuilder) { payload = getConfigFromGenericBuilder(builder); } else { payload = getConfigFromBuilder(configKey, builder, innerCNode); } return (innerCNode != null) ? payload.applyDefaultsFromDef(innerCNode) : payload; } return null; } private ConfigPayload getConfigFromBuilder(ConfigKey configKey, ConfigBuilder builder, InnerCNode targetDef) { try { ConfigInstance instance = InstanceResolver.resolveToInstance(configKey, builder, targetDef); log.log(LogLevel.DEBUG, () -> "getConfigFromBuilder for " + configKey + ",instance=" + instance); return ConfigPayload.fromInstance(instance); } catch (ConfigurationRuntimeException e) { log.log(LogLevel.INFO, "Error resolving instance for key '" + configKey + "', returning empty config: " + Exceptions.toMessageString(e)); return ConfigPayload.fromBuilder(new ConfigPayloadBuilder()); } } private ConfigPayload getConfigFromGenericBuilder(ConfigBuilder builder) { return ((GenericConfig.GenericConfigBuilder) builder).getPayload(); } @Override public Set<ConfigKey<?>> allConfigsProduced() { Set<ConfigKey<?>> keySet = new LinkedHashSet<>(); for (ConfigProducer producer : id2producer().values()) { keySet.addAll(configsProduced(producer)); } return keySet; } public ConfigInstance.Builder createBuilder(ConfigDefinitionKey key, ConfigDefinition targetDef) { String className = createClassName(key.getName()); Class<?> clazz; final String fullClassName = InstanceResolver.packageName(key) + "." + className; final String builderName = fullClassName + "$Builder"; final String producerName = fullClassName + "$Producer"; ClassLoader classLoader = getConfigClassLoader(producerName); if (classLoader == null) { classLoader = getClass().getClassLoader(); if (logDebug()) { log.log(LogLevel.DEBUG, "No producer found to get classloader from for " + fullClassName + ". Using default"); } } try { clazz = classLoader.loadClass(builderName); } catch (ClassNotFoundException e) { if (logDebug()) { log.log(LogLevel.DEBUG, "Tried to load " + builderName + ", not found, trying with generic builder"); } return new GenericConfig.GenericConfigBuilder(key, new ConfigPayloadBuilder()); } Object i; try { i = clazz.getDeclaredConstructor().newInstance(); } catch (ReflectiveOperationException e) { throw new ConfigurationRuntimeException(e); } if (!(i instanceof ConfigInstance.Builder)) { throw new ConfigurationRuntimeException(fullClassName + " is not a ConfigInstance.Builder, can not produce config for the name '" + key.getName() + "'."); } return (ConfigInstance.Builder) i; } private static boolean logDebug() { return log.isLoggable(LogLevel.DEBUG); } /** * The set of all config ids present * @return set of config ids */ public Set<String> allConfigIds() { return id2producer.keySet(); } @Override public void distributeFiles(FileDistribution fileDistribution) { getFileDistributor().sendDeployedFiles(fileDistribution); } @Override public AllocatedHosts allocatedHosts() { return allocatedHosts; } private static Set<ConfigKey<?>> configsProduced(ConfigProducer cp) { Set<ConfigKey<?>> ret = ReflectionUtil.configsProducedByInterface(cp.getClass(), cp.getConfigId()); UserConfigRepo userConfigs = cp.getUserConfigs(); for (ConfigDefinitionKey userKey : userConfigs.configsProduced()) { ret.add(new ConfigKey<>(userKey.getName(), cp.getConfigId(), userKey.getNamespace())); } return ret; } @Override public DeployState getDeployState() { if (deployState == null) throw new IllegalStateException("Cannot call getDeployState() once model has been built"); return deployState; } /** * @return an unmodifiable copy of the set of configIds in this VespaModel. */ public Set<String> getConfigIds() { return Collections.unmodifiableSet(id2producer.keySet()); } /** * Returns the admin component of the vespamodel. * * @return Admin */ public Admin getAdmin() { return root.getAdmin(); } /** * Adds the descendant (at any depth level), so it can be looked up * on configId in the Map. * * @param configId the id to register with, not necessarily equal to descendant.getConfigId(). * @param descendant The configProducer descendant to add */ public void addDescendant(String configId, AbstractConfigProducer descendant) { if (id2producer.containsKey(configId)) { throw new RuntimeException ("Config ID '" + configId + "' cannot be reserved by an instance of class '" + descendant.getClass().getName() + "' since it is already used by an instance of class '" + id2producer.get(configId).getClass().getName() + "'. (This is commonly caused by service/node index " + "collisions in the config.)"); } id2producer.put(configId, descendant); } /** * Writes MODEL.cfg files for all config producers. * * @param baseDirectory dir to write files to */ public void writeFiles(File baseDirectory) throws IOException { super.writeFiles(baseDirectory); for (ConfigProducer cp : id2producer.values()) { try { File destination = new File(baseDirectory, cp.getConfigId().replace("/", File.separator)); cp.writeFiles(destination); } catch (IOException e) { throw new IOException(cp.getConfigId() + ": " + e.getMessage()); } } } public Clients getClients() { return configModelRepo.getClients(); } /** Returns all search clusters, both in Search and Content */ public List<AbstractSearchCluster> getSearchClusters() { return Content.getSearchClusters(configModelRepo()); } /** Returns a map of content clusters by ID */ public Map<String, ContentCluster> getContentClusters() { Map<String, ContentCluster> clusters = new LinkedHashMap<>(); for (Content model : configModelRepo.getModels(Content.class)) { clusters.put(model.getId(), model.getCluster()); } return Collections.unmodifiableMap(clusters); } /** Returns a map of container clusters by ID */ public Map<String, ContainerCluster> getContainerClusters() { Map<String, ContainerCluster> clusters = new LinkedHashMap<>(); for (ContainerModel model : configModelRepo.getModels(ContainerModel.class)) { clusters.put(model.getId(), model.getCluster()); } return Collections.unmodifiableMap(clusters); } /** Returns the routing config model. This might be null. */ public Routing getRouting() { return configModelRepo.getRouting(); } public FileDistributionConfigProducer getFileDistributionConfigProducer() { return root.getFileDistributionConfigProducer(); } /** The clusters of application specific generic services */ public List<ServiceCluster> serviceClusters() { return serviceClusters; } /** Returns an unmodifiable view of the mapping of config id to {@link ConfigProducer} */ public Map<String, ConfigProducer> id2producer() { return Collections.unmodifiableMap(id2producer); } /** * @return this root's model repository */ public ConfigModelRepo configModelRepo() { return configModelRepo; } @Override public DeployLogger deployLogger() { return deployLogger; } }
Is there a valid use case for `selection` to be `null` and for the generated selection to _not_ be constrained by the `documentType`?
private static String createSelectionString(String documentType, String selection) { if (selection == null) return ""; if (selection.isEmpty()) return documentType; StringBuilder sb = new StringBuilder(documentType); sb.append(" and ( ").append(selection).append(" )"); return sb.toString(); }
if (selection.isEmpty()) return documentType;
private static String createSelectionString(String documentType, String selection) { if ((selection == null) || selection.isEmpty()) return documentType; StringBuilder sb = new StringBuilder(documentType); sb.append(" and ( ").append(selection).append(" )"); return sb.toString(); }
class MessageBusVisitorSessionFactory implements VisitorSessionFactory { private static final LoadTypeSet loadTypes = new LoadTypeSet("client"); private static final DocumentAccess access = new MessageBusDocumentAccess(new MessageBusParams(loadTypes)); @Override public VisitorSession createVisitorSession(VisitorParameters params) throws ParseException { return access.createVisitorSession(params); } @Override public LoadTypeSet getLoadTypeSet() { return loadTypes; } }
class MessageBusVisitorSessionFactory implements VisitorSessionFactory { private static final LoadTypeSet loadTypes = new LoadTypeSet("client"); private static final DocumentAccess access = new MessageBusDocumentAccess(new MessageBusParams(loadTypes)); @Override public VisitorSession createVisitorSession(VisitorParameters params) throws ParseException { return access.createVisitorSession(params); } @Override public LoadTypeSet getLoadTypeSet() { return loadTypes; } }
Probably not.
private static String createSelectionString(String documentType, String selection) { if (selection == null) return ""; if (selection.isEmpty()) return documentType; StringBuilder sb = new StringBuilder(documentType); sb.append(" and ( ").append(selection).append(" )"); return sb.toString(); }
if (selection.isEmpty()) return documentType;
private static String createSelectionString(String documentType, String selection) { if ((selection == null) || selection.isEmpty()) return documentType; StringBuilder sb = new StringBuilder(documentType); sb.append(" and ( ").append(selection).append(" )"); return sb.toString(); }
class MessageBusVisitorSessionFactory implements VisitorSessionFactory { private static final LoadTypeSet loadTypes = new LoadTypeSet("client"); private static final DocumentAccess access = new MessageBusDocumentAccess(new MessageBusParams(loadTypes)); @Override public VisitorSession createVisitorSession(VisitorParameters params) throws ParseException { return access.createVisitorSession(params); } @Override public LoadTypeSet getLoadTypeSet() { return loadTypes; } }
class MessageBusVisitorSessionFactory implements VisitorSessionFactory { private static final LoadTypeSet loadTypes = new LoadTypeSet("client"); private static final DocumentAccess access = new MessageBusDocumentAccess(new MessageBusParams(loadTypes)); @Override public VisitorSession createVisitorSession(VisitorParameters params) throws ParseException { return access.createVisitorSession(params); } @Override public LoadTypeSet getLoadTypeSet() { return loadTypes; } }
To avoid unintended changes of the API, it's better to use explicit strings here and not cheat by reusing the serialization logic. That way changes to `ContactInfoHandler.contactToSlime` that causes the API to change (e.g. renaming a field in the JSON), will cause this test to fail.
public void testGettingAndFeedingContactInfo() throws Exception { tester.createApplication(); String notFoundMessage = "{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not find contact info for tenant1\"}"; assertResponse(new Request("https: Contact contact = new Contact(URI.create("https: Slime contactSlime = ContactInfoHandler.contactToSlime(contact); byte[] body = SlimeUtils.toJsonBytes(contactSlime); String expectedResponseMessage = "Added contact info for tenant1 - Contact{url=https: assertResponse(new Request("https: Response response = container.handleRequest(new Request("https: Contact actualContact = ContactInfoHandler.contactFromSlime(SlimeUtils.jsonToSlime(response.getBody())); assertEquals(contact, actualContact); }
byte[] body = SlimeUtils.toJsonBytes(contactSlime);
public void testGettingAndFeedingContactInfo() throws Exception { tester.createApplication(); String notFoundMessage = "{\"error-code\":\"NOT_FOUND\",\"message\":\"Could not find contact info for tenant1\"}"; assertResponse(new Request("http: String contactInfo = "{\"url\":\"https: String expectedResponseMessage = "Added contact info for tenant1 - Contact{url=https: assertResponse(new Request("http: Response response = container.handleRequest(new Request("http: String actualContactInfo = new String(response.getBody()); assertEquals(contactInfo, actualContactInfo); }
class ContactInfoHandlerTest extends ControllerContainerTest { private ContainerControllerTester tester; @Before public void before() { tester = new ContainerControllerTester(container, null); } @Test }
class ContactInfoHandlerTest extends ControllerContainerTest { private ContainerControllerTester tester; @Before public void before() { tester = new ContainerControllerTester(container, null); } @Test }
I prefer that small sd files like these are tested with SearchBuilder.createFromString("...") and the sd file inline in the test. Makes it a lot easier to understand the test. Goes for all 3 tests.
public void testMemorySummary() throws IOException, ParseException { DeployLoggerStub logger = new DeployLoggerStub(); SearchBuilder.createFromFile("src/test/examples/memorysummary.sd", logger); assertTrue(logger.entries.isEmpty()); }
SearchBuilder.createFromFile("src/test/examples/memorysummary.sd", logger);
public void testMemorySummary() throws ParseException { String sd = "search memorysummary {\n" + "\n" + " document memorysummary {\n" + "\n" + " field inmemory type string {\n" + " indexing: attribute | summary\n" + " }\n" + " field ondisk type string {\n" + " indexing: index " }\n" + "\n" + " }\n" + "\n" + "}"; DeployLoggerStub logger = new DeployLoggerStub(); SearchBuilder.createFromString(sd, logger); assertTrue(logger.entries.isEmpty()); }
class SummaryTestCase { @Test @Test public void testDiskSummary() throws IOException, ParseException { DeployLoggerStub logger = new DeployLoggerStub(); SearchBuilder.createFromFile("src/test/examples/disksummary.sd", logger); assertEquals(1, logger.entries.size()); assertEquals(Level.WARNING, logger.entries.get(0).level); assertEquals("summary field 'ondisk' in document summary 'default' references source field 'ondisk', " + "which is not an attribute: Using this summary will cause disk accesses. " + "Set 'from-disk' on this summary class to silence this warning.", logger.entries.get(0).message); } @Test public void testDiskSummaryExplicit() throws IOException, ParseException { DeployLoggerStub logger = new DeployLoggerStub(); SearchBuilder.createFromFile("src/test/examples/disksummaryexplicit.sd", logger); assertTrue(logger.entries.isEmpty()); } }
class SummaryTestCase { @Test @Test public void testDiskSummary() throws ParseException { String sd = "search disksummary {\n" + "\n" + " document disksummary {\n" + "\n" + " field inmemory type string {\n" + " indexing: attribute | summary\n" + " }\n" + " field ondisk type string {\n" + " indexing: index | summary\n" + " }\n" + "\n" + " }\n" + "\n" + "}"; DeployLoggerStub logger = new DeployLoggerStub(); SearchBuilder.createFromString(sd, logger); assertEquals(1, logger.entries.size()); assertEquals(Level.WARNING, logger.entries.get(0).level); assertEquals("summary field 'ondisk' in document summary 'default' references source field 'ondisk', " + "which is not an attribute: Using this summary will cause disk accesses. " + "Set 'from-disk' on this summary class to silence this warning.", logger.entries.get(0).message); } @Test public void testDiskSummaryExplicit() throws ParseException { String sd = "search disksummary {\n" + "\n" + " document disksummary {\n" + "\n" + " field inmemory type string {\n" + " indexing: attribute | summary\n" + " }\n" + " field ondisk type string {\n" + " indexing: index | summary\n" + " }\n" + "\n" + " }\n" + "\n" + " document-summary default {\n" + " from-disk\n" + " }\n" + "\n" + "}"; DeployLoggerStub logger = new DeployLoggerStub(); SearchBuilder.createFromString(sd, logger); assertTrue(logger.entries.isEmpty()); } }
How about we keep this sorted in SearchCluster instead?
private Group selectGroup(SearchCluster cluster) { ImmutableMap<Integer, SearchCluster.Group> byId = cluster.groups(); List<Integer> sortedKeys = new ArrayList<>(byId.keySet()); Collections.sort(sortedKeys); if (row != null && row < sortedKeys.size()) { return byId.get(sortedKeys.get(row)); } for (Integer id : sortedKeys) { SearchCluster.Group g = byId.get(id); if (g.hasSufficientCoverage()) { return g; } } return byId.get(sortedKeys.get(0)); }
private Group selectGroup(SearchCluster cluster) { if (group != null) { Optional<Group> specificGroup = cluster.group(group); if (specificGroup.isPresent()) { return specificGroup.get(); } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (group + 1) + " groups"); } } ImmutableCollection<Group> groups = cluster.groups().values(); for (Group g : groups) { if (g.hasSufficientCoverage()) { return g; } } return groups.iterator().next(); }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<SearchCluster.Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } public static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path); } } private final List<Part> parts; private final Integer row; private SearchPath(List<Part> parts, Integer row) { this.parts = parts; this.row = row; } private List<SearchCluster.Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } SearchCluster.Group group = selectGroup(cluster); if (parts.isEmpty()) { return group.nodes(); } Set<Integer> wanted = new HashSet<>(); int max = group.nodes().size(); for (Part part : parts) { wanted.addAll(part.matches(max)); } List<SearchCluster.Node> sortedByDistKey = new ArrayList<>(group.nodes()); sortedByDistKey.sort(Comparator.comparingInt(SearchCluster.Node::key)); List<SearchCluster.Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(sortedByDistKey.get(idx)); } return ret; } private boolean isEmpty() { return parts.isEmpty() && row == null; } private static SearchPath parseElement(String element) { Pair<String, String> partAndRow = halveAt('/', element); List<Part> parts = parseParts(partAndRow.getFirst()); Integer row = parseRow(partAndRow.getSecond()); return new SearchPath(parts, row); } private static List<Part> parseParts(String parts) { List<Part> ret = new ArrayList<>(); while (parts.length() > 0) { if (parts.startsWith("[")) { parts = parsePartRange(parts, ret); } else { if (isWildcard(parts)) { return Collections.emptyList(); } parts = parsePartNum(parts, ret); } } return ret; } private static final Pattern WILDCARD_PART = Pattern.compile("^\\*?(?:,|$)"); private static boolean isWildcard(String part) { return WILDCARD_PART.matcher(part).lookingAt(); } private static final Pattern PART_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parsePartRange(String parts, List<Part> into) { Matcher m = PART_RANGE.matcher(parts); if (m.find()) { String ret = parts.substring(m.end()); Integer start = Integer.parseInt(m.group(1)); Integer end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new Part(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parsePartNum(String parts, List<Part> into) { Pair<String, String> numAndRest = halveAt(',', parts); int partNum = Integer.parseInt(numAndRest.getFirst()); into.add(new Part(partNum, partNum + 1)); return numAndRest.getSecond(); } private static Integer parseRow(String row) { if (row.isEmpty()) { return null; } if ("/".equals(row) || "*".equals(row)) { return null; } return Integer.parseInt(row); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1, string.length())); } return new Pair<>(string, ""); } @Override public String toString() { StringBuilder sb = new StringBuilder(); boolean first = true; for (Part p : parts) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } if (row != null) { sb.append('/').append(row); } return sb.toString(); } private static class Part { private final int from; private final int to; Part(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = (to > max) ? max : to; return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } } }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<SearchCluster.Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } public static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<NodeSelection> nodes; private final Integer group; private SearchPath(List<NodeSelection> nodes, Integer group) { this.nodes = nodes; this.group = group; } private List<SearchCluster.Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } SearchCluster.Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<SearchCluster.Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (NodeSelection node : nodes) { wanted.addAll(node.matches(max)); } List<SearchCluster.Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && group == null; } private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroup = halveAt('/', element); List<NodeSelection> nodes = parseNodes(nodesAndGroup.getFirst()); Integer group = parseGroup(nodesAndGroup.getSecond()); return new SearchPath(nodes, group); } private static List<NodeSelection> parseNodes(String nodes) { List<NodeSelection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<NodeSelection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); Integer start = Integer.parseInt(m.group(1)); Integer end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new NodeSelection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<NodeSelection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new NodeSelection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Integer parseGroup(String group) { if (group.isEmpty()) { return null; } if ("/".equals(group) || "*".equals(group)) { return null; } return Integer.parseInt(group); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1, string.length())); } return new Pair<>(string, ""); } @Override public String toString() { StringBuilder sb = new StringBuilder(); boolean first = true; for (NodeSelection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } if (group != null) { sb.append('/').append(group); } return sb.toString(); } private static class NodeSelection { private final int from; private final int to; NodeSelection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = (to > max) ? max : to; return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
Shouldn't we throw an IllegalArgumentException here?
private Group selectGroup(SearchCluster cluster) { ImmutableMap<Integer, SearchCluster.Group> byId = cluster.groups(); List<Integer> sortedKeys = new ArrayList<>(byId.keySet()); Collections.sort(sortedKeys); if (row != null && row < sortedKeys.size()) { return byId.get(sortedKeys.get(row)); } for (Integer id : sortedKeys) { SearchCluster.Group g = byId.get(id); if (g.hasSufficientCoverage()) { return g; } } return byId.get(sortedKeys.get(0)); }
private Group selectGroup(SearchCluster cluster) { if (group != null) { Optional<Group> specificGroup = cluster.group(group); if (specificGroup.isPresent()) { return specificGroup.get(); } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (group + 1) + " groups"); } } ImmutableCollection<Group> groups = cluster.groups().values(); for (Group g : groups) { if (g.hasSufficientCoverage()) { return g; } } return groups.iterator().next(); }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<SearchCluster.Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } public static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path); } } private final List<Part> parts; private final Integer row; private SearchPath(List<Part> parts, Integer row) { this.parts = parts; this.row = row; } private List<SearchCluster.Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } SearchCluster.Group group = selectGroup(cluster); if (parts.isEmpty()) { return group.nodes(); } Set<Integer> wanted = new HashSet<>(); int max = group.nodes().size(); for (Part part : parts) { wanted.addAll(part.matches(max)); } List<SearchCluster.Node> sortedByDistKey = new ArrayList<>(group.nodes()); sortedByDistKey.sort(Comparator.comparingInt(SearchCluster.Node::key)); List<SearchCluster.Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(sortedByDistKey.get(idx)); } return ret; } private boolean isEmpty() { return parts.isEmpty() && row == null; } private static SearchPath parseElement(String element) { Pair<String, String> partAndRow = halveAt('/', element); List<Part> parts = parseParts(partAndRow.getFirst()); Integer row = parseRow(partAndRow.getSecond()); return new SearchPath(parts, row); } private static List<Part> parseParts(String parts) { List<Part> ret = new ArrayList<>(); while (parts.length() > 0) { if (parts.startsWith("[")) { parts = parsePartRange(parts, ret); } else { if (isWildcard(parts)) { return Collections.emptyList(); } parts = parsePartNum(parts, ret); } } return ret; } private static final Pattern WILDCARD_PART = Pattern.compile("^\\*?(?:,|$)"); private static boolean isWildcard(String part) { return WILDCARD_PART.matcher(part).lookingAt(); } private static final Pattern PART_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parsePartRange(String parts, List<Part> into) { Matcher m = PART_RANGE.matcher(parts); if (m.find()) { String ret = parts.substring(m.end()); Integer start = Integer.parseInt(m.group(1)); Integer end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new Part(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parsePartNum(String parts, List<Part> into) { Pair<String, String> numAndRest = halveAt(',', parts); int partNum = Integer.parseInt(numAndRest.getFirst()); into.add(new Part(partNum, partNum + 1)); return numAndRest.getSecond(); } private static Integer parseRow(String row) { if (row.isEmpty()) { return null; } if ("/".equals(row) || "*".equals(row)) { return null; } return Integer.parseInt(row); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1, string.length())); } return new Pair<>(string, ""); } @Override public String toString() { StringBuilder sb = new StringBuilder(); boolean first = true; for (Part p : parts) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } if (row != null) { sb.append('/').append(row); } return sb.toString(); } private static class Part { private final int from; private final int to; Part(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = (to > max) ? max : to; return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } } }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<SearchCluster.Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } public static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<NodeSelection> nodes; private final Integer group; private SearchPath(List<NodeSelection> nodes, Integer group) { this.nodes = nodes; this.group = group; } private List<SearchCluster.Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } SearchCluster.Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<SearchCluster.Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (NodeSelection node : nodes) { wanted.addAll(node.matches(max)); } List<SearchCluster.Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && group == null; } private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroup = halveAt('/', element); List<NodeSelection> nodes = parseNodes(nodesAndGroup.getFirst()); Integer group = parseGroup(nodesAndGroup.getSecond()); return new SearchPath(nodes, group); } private static List<NodeSelection> parseNodes(String nodes) { List<NodeSelection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<NodeSelection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); Integer start = Integer.parseInt(m.group(1)); Integer end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new NodeSelection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<NodeSelection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new NodeSelection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Integer parseGroup(String group) { if (group.isEmpty()) { return null; } if ("/".equals(group) || "*".equals(group)) { return null; } return Integer.parseInt(group); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1, string.length())); } return new Pair<>(string, ""); } @Override public String toString() { StringBuilder sb = new StringBuilder(); boolean first = true; for (NodeSelection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } if (group != null) { sb.append('/').append(group); } return sb.toString(); } private static class NodeSelection { private final int from; private final int to; NodeSelection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = (to > max) ? max : to; return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
Pass the original exception as second argument to avoid losing information.
public static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path); } }
throw new InvalidSearchPathException("Invalid search path: " + path);
public static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<SearchCluster.Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } private final List<Part> parts; private final Integer row; private SearchPath(List<Part> parts, Integer row) { this.parts = parts; this.row = row; } private List<SearchCluster.Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } SearchCluster.Group group = selectGroup(cluster); if (parts.isEmpty()) { return group.nodes(); } Set<Integer> wanted = new HashSet<>(); int max = group.nodes().size(); for (Part part : parts) { wanted.addAll(part.matches(max)); } List<SearchCluster.Node> sortedByDistKey = new ArrayList<>(group.nodes()); sortedByDistKey.sort(Comparator.comparingInt(SearchCluster.Node::key)); List<SearchCluster.Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(sortedByDistKey.get(idx)); } return ret; } private boolean isEmpty() { return parts.isEmpty() && row == null; } private Group selectGroup(SearchCluster cluster) { ImmutableMap<Integer, SearchCluster.Group> byId = cluster.groups(); List<Integer> sortedKeys = new ArrayList<>(byId.keySet()); Collections.sort(sortedKeys); if (row != null && row < sortedKeys.size()) { return byId.get(sortedKeys.get(row)); } for (Integer id : sortedKeys) { SearchCluster.Group g = byId.get(id); if (g.hasSufficientCoverage()) { return g; } } return byId.get(sortedKeys.get(0)); } private static SearchPath parseElement(String element) { Pair<String, String> partAndRow = halveAt('/', element); List<Part> parts = parseParts(partAndRow.getFirst()); Integer row = parseRow(partAndRow.getSecond()); return new SearchPath(parts, row); } private static List<Part> parseParts(String parts) { List<Part> ret = new ArrayList<>(); while (parts.length() > 0) { if (parts.startsWith("[")) { parts = parsePartRange(parts, ret); } else { if (isWildcard(parts)) { return Collections.emptyList(); } parts = parsePartNum(parts, ret); } } return ret; } private static final Pattern WILDCARD_PART = Pattern.compile("^\\*?(?:,|$)"); private static boolean isWildcard(String part) { return WILDCARD_PART.matcher(part).lookingAt(); } private static final Pattern PART_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parsePartRange(String parts, List<Part> into) { Matcher m = PART_RANGE.matcher(parts); if (m.find()) { String ret = parts.substring(m.end()); Integer start = Integer.parseInt(m.group(1)); Integer end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new Part(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parsePartNum(String parts, List<Part> into) { Pair<String, String> numAndRest = halveAt(',', parts); int partNum = Integer.parseInt(numAndRest.getFirst()); into.add(new Part(partNum, partNum + 1)); return numAndRest.getSecond(); } private static Integer parseRow(String row) { if (row.isEmpty()) { return null; } if ("/".equals(row) || "*".equals(row)) { return null; } return Integer.parseInt(row); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1, string.length())); } return new Pair<>(string, ""); } @Override public String toString() { StringBuilder sb = new StringBuilder(); boolean first = true; for (Part p : parts) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } if (row != null) { sb.append('/').append(row); } return sb.toString(); } private static class Part { private final int from; private final int to; Part(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = (to > max) ? max : to; return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } } }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<SearchCluster.Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } private final List<NodeSelection> nodes; private final Integer group; private SearchPath(List<NodeSelection> nodes, Integer group) { this.nodes = nodes; this.group = group; } private List<SearchCluster.Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } SearchCluster.Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<SearchCluster.Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (NodeSelection node : nodes) { wanted.addAll(node.matches(max)); } List<SearchCluster.Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && group == null; } private Group selectGroup(SearchCluster cluster) { if (group != null) { Optional<Group> specificGroup = cluster.group(group); if (specificGroup.isPresent()) { return specificGroup.get(); } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (group + 1) + " groups"); } } ImmutableCollection<Group> groups = cluster.groups().values(); for (Group g : groups) { if (g.hasSufficientCoverage()) { return g; } } return groups.iterator().next(); } private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroup = halveAt('/', element); List<NodeSelection> nodes = parseNodes(nodesAndGroup.getFirst()); Integer group = parseGroup(nodesAndGroup.getSecond()); return new SearchPath(nodes, group); } private static List<NodeSelection> parseNodes(String nodes) { List<NodeSelection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<NodeSelection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); Integer start = Integer.parseInt(m.group(1)); Integer end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new NodeSelection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<NodeSelection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new NodeSelection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Integer parseGroup(String group) { if (group.isEmpty()) { return null; } if ("/".equals(group) || "*".equals(group)) { return null; } return Integer.parseInt(group); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1, string.length())); } return new Pair<>(string, ""); } @Override public String toString() { StringBuilder sb = new StringBuilder(); boolean first = true; for (NodeSelection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } if (group != null) { sb.append('/').append(group); } return sb.toString(); } private static class NodeSelection { private final int from; private final int to; NodeSelection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = (to > max) ? max : to; return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
I initially thought that the use of searchPath is so limited that doing more processing in the parsing phase is preferable. At the same time the matrix of nodes in SearchCluster doesn't really take all that much space either and will take care of these issues, so I'll do that. (Applies to the node list ordering too)
private Group selectGroup(SearchCluster cluster) { ImmutableMap<Integer, SearchCluster.Group> byId = cluster.groups(); List<Integer> sortedKeys = new ArrayList<>(byId.keySet()); Collections.sort(sortedKeys); if (row != null && row < sortedKeys.size()) { return byId.get(sortedKeys.get(row)); } for (Integer id : sortedKeys) { SearchCluster.Group g = byId.get(id); if (g.hasSufficientCoverage()) { return g; } } return byId.get(sortedKeys.get(0)); }
private Group selectGroup(SearchCluster cluster) { if (group != null) { Optional<Group> specificGroup = cluster.group(group); if (specificGroup.isPresent()) { return specificGroup.get(); } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (group + 1) + " groups"); } } ImmutableCollection<Group> groups = cluster.groups().values(); for (Group g : groups) { if (g.hasSufficientCoverage()) { return g; } } return groups.iterator().next(); }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<SearchCluster.Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } public static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path); } } private final List<Part> parts; private final Integer row; private SearchPath(List<Part> parts, Integer row) { this.parts = parts; this.row = row; } private List<SearchCluster.Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } SearchCluster.Group group = selectGroup(cluster); if (parts.isEmpty()) { return group.nodes(); } Set<Integer> wanted = new HashSet<>(); int max = group.nodes().size(); for (Part part : parts) { wanted.addAll(part.matches(max)); } List<SearchCluster.Node> sortedByDistKey = new ArrayList<>(group.nodes()); sortedByDistKey.sort(Comparator.comparingInt(SearchCluster.Node::key)); List<SearchCluster.Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(sortedByDistKey.get(idx)); } return ret; } private boolean isEmpty() { return parts.isEmpty() && row == null; } private static SearchPath parseElement(String element) { Pair<String, String> partAndRow = halveAt('/', element); List<Part> parts = parseParts(partAndRow.getFirst()); Integer row = parseRow(partAndRow.getSecond()); return new SearchPath(parts, row); } private static List<Part> parseParts(String parts) { List<Part> ret = new ArrayList<>(); while (parts.length() > 0) { if (parts.startsWith("[")) { parts = parsePartRange(parts, ret); } else { if (isWildcard(parts)) { return Collections.emptyList(); } parts = parsePartNum(parts, ret); } } return ret; } private static final Pattern WILDCARD_PART = Pattern.compile("^\\*?(?:,|$)"); private static boolean isWildcard(String part) { return WILDCARD_PART.matcher(part).lookingAt(); } private static final Pattern PART_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parsePartRange(String parts, List<Part> into) { Matcher m = PART_RANGE.matcher(parts); if (m.find()) { String ret = parts.substring(m.end()); Integer start = Integer.parseInt(m.group(1)); Integer end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new Part(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parsePartNum(String parts, List<Part> into) { Pair<String, String> numAndRest = halveAt(',', parts); int partNum = Integer.parseInt(numAndRest.getFirst()); into.add(new Part(partNum, partNum + 1)); return numAndRest.getSecond(); } private static Integer parseRow(String row) { if (row.isEmpty()) { return null; } if ("/".equals(row) || "*".equals(row)) { return null; } return Integer.parseInt(row); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1, string.length())); } return new Pair<>(string, ""); } @Override public String toString() { StringBuilder sb = new StringBuilder(); boolean first = true; for (Part p : parts) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } if (row != null) { sb.append('/').append(row); } return sb.toString(); } private static class Part { private final int from; private final int to; Part(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = (to > max) ? max : to; return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } } }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<SearchCluster.Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } public static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<NodeSelection> nodes; private final Integer group; private SearchPath(List<NodeSelection> nodes, Integer group) { this.nodes = nodes; this.group = group; } private List<SearchCluster.Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } SearchCluster.Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<SearchCluster.Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (NodeSelection node : nodes) { wanted.addAll(node.matches(max)); } List<SearchCluster.Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && group == null; } private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroup = halveAt('/', element); List<NodeSelection> nodes = parseNodes(nodesAndGroup.getFirst()); Integer group = parseGroup(nodesAndGroup.getSecond()); return new SearchPath(nodes, group); } private static List<NodeSelection> parseNodes(String nodes) { List<NodeSelection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<NodeSelection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); Integer start = Integer.parseInt(m.group(1)); Integer end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new NodeSelection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<NodeSelection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new NodeSelection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Integer parseGroup(String group) { if (group.isEmpty()) { return null; } if ("/".equals(group) || "*".equals(group)) { return null; } return Integer.parseInt(group); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1, string.length())); } return new Pair<>(string, ""); } @Override public String toString() { StringBuilder sb = new StringBuilder(); boolean first = true; for (NodeSelection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } if (group != null) { sb.append('/').append(group); } return sb.toString(); } private static class NodeSelection { private final int from; private final int to; NodeSelection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = (to > max) ? max : to; return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
The Search API doc doesn't mention overflows and the system test only tests against node overflows (which are just dropped). I'll switch over to an error since it's probably the path of least surprise.
private Group selectGroup(SearchCluster cluster) { ImmutableMap<Integer, SearchCluster.Group> byId = cluster.groups(); List<Integer> sortedKeys = new ArrayList<>(byId.keySet()); Collections.sort(sortedKeys); if (row != null && row < sortedKeys.size()) { return byId.get(sortedKeys.get(row)); } for (Integer id : sortedKeys) { SearchCluster.Group g = byId.get(id); if (g.hasSufficientCoverage()) { return g; } } return byId.get(sortedKeys.get(0)); }
private Group selectGroup(SearchCluster cluster) { if (group != null) { Optional<Group> specificGroup = cluster.group(group); if (specificGroup.isPresent()) { return specificGroup.get(); } else { throw new InvalidSearchPathException("Invalid searchPath, cluster does not have " + (group + 1) + " groups"); } } ImmutableCollection<Group> groups = cluster.groups().values(); for (Group g : groups) { if (g.hasSufficientCoverage()) { return g; } } return groups.iterator().next(); }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<SearchCluster.Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } public static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path); } } private final List<Part> parts; private final Integer row; private SearchPath(List<Part> parts, Integer row) { this.parts = parts; this.row = row; } private List<SearchCluster.Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } SearchCluster.Group group = selectGroup(cluster); if (parts.isEmpty()) { return group.nodes(); } Set<Integer> wanted = new HashSet<>(); int max = group.nodes().size(); for (Part part : parts) { wanted.addAll(part.matches(max)); } List<SearchCluster.Node> sortedByDistKey = new ArrayList<>(group.nodes()); sortedByDistKey.sort(Comparator.comparingInt(SearchCluster.Node::key)); List<SearchCluster.Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(sortedByDistKey.get(idx)); } return ret; } private boolean isEmpty() { return parts.isEmpty() && row == null; } private static SearchPath parseElement(String element) { Pair<String, String> partAndRow = halveAt('/', element); List<Part> parts = parseParts(partAndRow.getFirst()); Integer row = parseRow(partAndRow.getSecond()); return new SearchPath(parts, row); } private static List<Part> parseParts(String parts) { List<Part> ret = new ArrayList<>(); while (parts.length() > 0) { if (parts.startsWith("[")) { parts = parsePartRange(parts, ret); } else { if (isWildcard(parts)) { return Collections.emptyList(); } parts = parsePartNum(parts, ret); } } return ret; } private static final Pattern WILDCARD_PART = Pattern.compile("^\\*?(?:,|$)"); private static boolean isWildcard(String part) { return WILDCARD_PART.matcher(part).lookingAt(); } private static final Pattern PART_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parsePartRange(String parts, List<Part> into) { Matcher m = PART_RANGE.matcher(parts); if (m.find()) { String ret = parts.substring(m.end()); Integer start = Integer.parseInt(m.group(1)); Integer end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new Part(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parsePartNum(String parts, List<Part> into) { Pair<String, String> numAndRest = halveAt(',', parts); int partNum = Integer.parseInt(numAndRest.getFirst()); into.add(new Part(partNum, partNum + 1)); return numAndRest.getSecond(); } private static Integer parseRow(String row) { if (row.isEmpty()) { return null; } if ("/".equals(row) || "*".equals(row)) { return null; } return Integer.parseInt(row); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1, string.length())); } return new Pair<>(string, ""); } @Override public String toString() { StringBuilder sb = new StringBuilder(); boolean first = true; for (Part p : parts) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } if (row != null) { sb.append('/').append(row); } return sb.toString(); } private static class Part { private final int from; private final int to; Part(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = (to > max) ? max : to; return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } } }
class SearchPath { /** * Parse the search path and select nodes from the given cluster based on it. * * @param searchPath * unparsed search path expression (see: model.searchPath in Search * API reference) * @param cluster * the search cluster from which nodes are selected * @throws InvalidSearchPathException * if the searchPath is malformed * @return list of nodes chosen with the search path, or an empty list in which * case some other node selection logic should be used */ public static List<SearchCluster.Node> selectNodes(String searchPath, SearchCluster cluster) { Optional<SearchPath> sp = SearchPath.fromString(searchPath); if (sp.isPresent()) { return sp.get().mapToNodes(cluster); } else { return Collections.emptyList(); } } public static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path: " + path, e); } } private final List<NodeSelection> nodes; private final Integer group; private SearchPath(List<NodeSelection> nodes, Integer group) { this.nodes = nodes; this.group = group; } private List<SearchCluster.Node> mapToNodes(SearchCluster cluster) { if (cluster.groups().isEmpty()) { return Collections.emptyList(); } SearchCluster.Group selectedGroup = selectGroup(cluster); if (nodes.isEmpty()) { return selectedGroup.nodes(); } List<SearchCluster.Node> groupNodes = selectedGroup.nodes(); Set<Integer> wanted = new HashSet<>(); int max = groupNodes.size(); for (NodeSelection node : nodes) { wanted.addAll(node.matches(max)); } List<SearchCluster.Node> ret = new ArrayList<>(); for (int idx : wanted) { ret.add(groupNodes.get(idx)); } return ret; } private boolean isEmpty() { return nodes.isEmpty() && group == null; } private static SearchPath parseElement(String element) { Pair<String, String> nodesAndGroup = halveAt('/', element); List<NodeSelection> nodes = parseNodes(nodesAndGroup.getFirst()); Integer group = parseGroup(nodesAndGroup.getSecond()); return new SearchPath(nodes, group); } private static List<NodeSelection> parseNodes(String nodes) { List<NodeSelection> ret = new ArrayList<>(); while (nodes.length() > 0) { if (nodes.startsWith("[")) { nodes = parseNodeRange(nodes, ret); } else { if (isWildcard(nodes)) { return Collections.emptyList(); } nodes = parseNodeNum(nodes, ret); } } return ret; } private static final Pattern NODE_WILDCARD = Pattern.compile("^\\*?(?:,|$)"); private static boolean isWildcard(String node) { return NODE_WILDCARD.matcher(node).lookingAt(); } private static final Pattern NODE_RANGE = Pattern.compile("^\\[(\\d+),(\\d+)>(?:,|$)"); private static String parseNodeRange(String nodes, List<NodeSelection> into) { Matcher m = NODE_RANGE.matcher(nodes); if (m.find()) { String ret = nodes.substring(m.end()); Integer start = Integer.parseInt(m.group(1)); Integer end = Integer.parseInt(m.group(2)); if (start > end) { throw new InvalidSearchPathException("Invalid range"); } into.add(new NodeSelection(start, end)); return ret; } else { throw new InvalidSearchPathException("Invalid range expression"); } } private static String parseNodeNum(String nodes, List<NodeSelection> into) { Pair<String, String> numAndRest = halveAt(',', nodes); int nodeNum = Integer.parseInt(numAndRest.getFirst()); into.add(new NodeSelection(nodeNum, nodeNum + 1)); return numAndRest.getSecond(); } private static Integer parseGroup(String group) { if (group.isEmpty()) { return null; } if ("/".equals(group) || "*".equals(group)) { return null; } return Integer.parseInt(group); } private static Pair<String, String> halveAt(char divider, String string) { int pos = string.indexOf(divider); if (pos >= 0) { return new Pair<>(string.substring(0, pos), string.substring(pos + 1, string.length())); } return new Pair<>(string, ""); } @Override public String toString() { StringBuilder sb = new StringBuilder(); boolean first = true; for (NodeSelection p : nodes) { if (first) { first = false; } else { sb.append(','); } sb.append(p.toString()); } if (group != null) { sb.append('/').append(group); } return sb.toString(); } private static class NodeSelection { private final int from; private final int to; NodeSelection(int from, int to) { this.from = from; this.to = to; } public Collection<Integer> matches(int max) { if (from >= max) { return Collections.emptyList(); } int end = (to > max) ? max : to; return IntStream.range(from, end).boxed().collect(Collectors.toList()); } @Override public String toString() { if (from + 1 == to) { return Integer.toString(from); } else { return "[" + from + "," + to + ">"; } } } public static class InvalidSearchPathException extends RuntimeException { public InvalidSearchPathException(String message) { super(message); } public InvalidSearchPathException(String message, Throwable cause) { super(message, cause); } } }
Feel free to remove debug logging statements
public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } }
context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing");
public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final AthenzCredentialsMaintainer athenzCredentialsMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge, final AthenzCredentialsMaintainer athenzCredentialsMaintainer) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { context.log(logger, LogLevel.ERROR, "Unhandled throwable, taking down system.", t); System.exit(234); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("hostname", context.hostname()); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); debug.put("nodeRepoState", lastNode.getState().name()); return debug; } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context.containerName(), "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); context.log(logger, "Stopped"); } /** * Verifies that service is healthy, otherwise throws an exception. The default implementation does * nothing, override if it's necessary to verify that a service is healthy before resuming. */ protected void verifyHealth(NodeSpec node) { } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context.containerName()); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context.containerName(), node); storageMaintainer.writeFilebeatConfig(context.containerName(), node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context.containerName()); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes() .withRestartGeneration(node.getCurrentRestartGeneration()) .withRebootGeneration(node.getCurrentRebootGeneration()) .withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); final NodeAttributes wantedNodeAttributes = new NodeAttributes() .withRestartGeneration(node.getWantedRestartGeneration()) .withRebootGeneration(node.getWantedRebootGeneration()) .withDockerImage(node.getWantedDockerImage().filter(n -> containerState == UNKNOWN).orElse(new DockerImage(""))); publishStateToNodeRepoIfChanged(currentNodeAttributes, wantedNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes wantedAttributes) { if (!currentAttributes.equals(wantedAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, wantedAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), wantedAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context.containerName(), node, containerData); dockerOperations.startContainer(context.containerName()); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (!node.getCurrentRestartGeneration().isPresent() || node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context.containerName()); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context.containerName()); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context.containerName()); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + node.getWantedDockerImage().get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); dockerOperations.removeContainer(existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } doAtTickStart(isFrozen); boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } doAtTickEnd(converged); } void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) return; final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context.containerName(), node); } context.log(logger, LogLevel.DEBUG, "Loading new node spec: " + node.toString()); lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context.containerName(), node); storageMaintainer.getDiskUsageFor(context.containerName()) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context.containerName())); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.run(); } verifyHealth(node); startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.converge(); doBeforeConverge(node); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.clearCredentials(); storageMaintainer.cleanupNodeStorage(context.containerName(), node); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } } /** * Execute at start of tick * * WARNING: MUST NOT throw an exception * * @param frozen whether the agent is frozen */ protected void doAtTickStart(boolean frozen) {} /** * Execute at end of tick * * WARNING: MUST NOT throw an exception * * @param converged Whether the tick converged: converge() was called without exception */ protected void doAtTickEnd(boolean converged) {} /** * Execute at end of a (so far) successful converge of an active node * * Method a subclass can override to execute code: * - Called right before the node repo is updated with converged attributes, and * Orchestrator resume() is called * - The only way to avoid a successful converge and the update to the node repo * and Orchestrator is to throw an exception * - The method is only called in a tick if the node is active, not frozen, and * there are no prior phases of the converge that fails * * @throws RuntimeException to fail the convergence */ protected void doBeforeConverge(NodeSpec node) {} private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context.containerName()); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", "tenants") .add("state", node.getState().toString()) .add("parentHostname", environment.getParentHostHostname()); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context.containerName()); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context.containerName(), 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context.containerName()); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Runnable aclMaintainer; private final Environment environment; private final Clock clock; private final Duration timeBetweenEachConverge; private final AthenzCredentialsMaintainer athenzCredentialsMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Runnable aclMaintainer, final Environment environment, final Clock clock, final Duration timeBetweenEachConverge, final AthenzCredentialsMaintainer athenzCredentialsMaintainer) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.aclMaintainer = aclMaintainer; this.environment = environment; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { context.log(logger, LogLevel.ERROR, "Unhandled throwable, taking down system.", t); System.exit(234); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override @Override public Map<String, Object> debugInfo() { Map<String, Object> debug = new LinkedHashMap<>(); debug.put("hostname", context.hostname()); debug.put("isFrozen", isFrozen); debug.put("wantFrozen", wantFrozen); debug.put("terminated", terminated); debug.put("workToDoNow", workToDoNow); debug.put("nodeRepoState", lastNode.getState().name()); return debug; } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context.containerName(), "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); context.log(logger, "Stopped"); } /** * Verifies that service is healthy, otherwise throws an exception. The default implementation does * nothing, override if it's necessary to verify that a service is healthy before resuming. */ protected void verifyHealth(NodeSpec node) { } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context.containerName()); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context.containerName(), node); storageMaintainer.writeFilebeatConfig(context.containerName(), node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context.containerName()); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes() .withRestartGeneration(node.getCurrentRestartGeneration()) .withRebootGeneration(node.getCurrentRebootGeneration()) .withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); final NodeAttributes wantedNodeAttributes = new NodeAttributes() .withRestartGeneration(node.getWantedRestartGeneration()) .withRebootGeneration(node.getWantedRebootGeneration()) .withDockerImage(node.getWantedDockerImage().filter(n -> containerState == UNKNOWN).orElse(new DockerImage(""))); publishStateToNodeRepoIfChanged(currentNodeAttributes, wantedNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes wantedAttributes) { if (!currentAttributes.equals(wantedAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, wantedAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), wantedAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context.containerName(), node, containerData); dockerOperations.startContainer(context.containerName()); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (!node.getCurrentRestartGeneration().isPresent() || node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context.containerName()); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context.containerName()); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context.containerName()); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer + " -> " + node.getWantedDockerImage().get()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); dockerOperations.removeContainer(existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } doAtTickStart(isFrozen); boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } doAtTickEnd(converged); } void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) return; final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context.containerName(), node); } context.log(logger, LogLevel.DEBUG, "Loading new node spec: " + node.toString()); lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context.containerName(), node); storageMaintainer.getDiskUsageFor(context.containerName()) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context.containerName())); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.run(); } verifyHealth(node); startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.converge(); doBeforeConverge(node); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.clearCredentials(); storageMaintainer.cleanupNodeStorage(context.containerName(), node); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } } /** * Execute at start of tick * * WARNING: MUST NOT throw an exception * * @param frozen whether the agent is frozen */ protected void doAtTickStart(boolean frozen) {} /** * Execute at end of tick * * WARNING: MUST NOT throw an exception * * @param converged Whether the tick converged: converge() was called without exception */ protected void doAtTickEnd(boolean converged) {} /** * Execute at end of a (so far) successful converge of an active node * * Method a subclass can override to execute code: * - Called right before the node repo is updated with converged attributes, and * Orchestrator resume() is called * - The only way to avoid a successful converge and the update to the node repo * and Orchestrator is to throw an exception * - The method is only called in a tick if the node is active, not frozen, and * there are no prior phases of the converge that fails * * @throws RuntimeException to fail the convergence */ protected void doBeforeConverge(NodeSpec node) {} private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context.containerName()); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", "tenants") .add("state", node.getState().toString()) .add("parentHostname", environment.getParentHostHostname()); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context.containerName()); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context.containerName(), 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context.containerName()); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
Consider making a method pathOnHostFromPathInNodeUnderVespaHome()
public void removeOldFilesFromNode(NodeAgentContext context) { Path[] logPaths = { context.pathInNodeUnderVespaHome("logs/elasticsearch2"), context.pathInNodeUnderVespaHome("logs/logstash2"), context.pathInNodeUnderVespaHome("logs/daemontools_y"), context.pathInNodeUnderVespaHome("logs/nginx"), context.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : logPaths) { Path path = context.pathOnHostFromPathInNode(pathToClean); FileFinder.files(path) .match(olderThan(Duration.ofDays(3)).and(nameMatches(Pattern.compile(".*\\.log.+")))) .maxDepth(1) .deleteRecursively(); } FileFinder.files(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("logs/vespa/qrs"))) .match(olderThan(Duration.ofDays(3))) .deleteRecursively(); FileFinder.files(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("logs/vespa/logarchive"))) .match(olderThan(Duration.ofDays(31))) .deleteRecursively(); FileFinder.directories(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("var/db/vespa/filedistribution"))) .match(olderThan(Duration.ofDays(31))) .deleteRecursively(); }
FileFinder.files(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("logs/vespa/qrs")))
public void removeOldFilesFromNode(NodeAgentContext context) { Path[] logPaths = { context.pathInNodeUnderVespaHome("logs/elasticsearch2"), context.pathInNodeUnderVespaHome("logs/logstash2"), context.pathInNodeUnderVespaHome("logs/daemontools_y"), context.pathInNodeUnderVespaHome("logs/nginx"), context.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : logPaths) { Path path = context.pathOnHostFromPathInNode(pathToClean); FileFinder.files(path) .match(olderThan(Duration.ofDays(3)).and(nameMatches(Pattern.compile(".*\\.log.+")))) .maxDepth(1) .deleteRecursively(); } FileFinder.files(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("logs/vespa/qrs"))) .match(olderThan(Duration.ofDays(3))) .deleteRecursively(); FileFinder.files(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("logs/vespa/logarchive"))) .match(olderThan(Duration.ofDays(31))) .deleteRecursively(); FileFinder.directories(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("var/db/vespa/filedistribution"))) .match(olderThan(Duration.ofDays(31))) .deleteRecursively(); }
class StorageMaintainer { private static final Logger logger = Logger.getLogger(StorageMaintainer.class.getName()); private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter .ofPattern("yyyyMMddHHmmss").withZone(ZoneOffset.UTC); private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final CoredumpHandler coredumpHandler; private final Path archiveContainerStoragePath; public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, Environment environment, CoredumpHandler coredumpHandler, Path archiveContainerStoragePath) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.coredumpHandler = coredumpHandler; this.archiveContainerStoragePath = archiveContainerStoragePath; } public void writeMetricsConfig(NodeAgentContext context, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", context.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); Path athenzCertExpiryCheckPath = context.pathInNodeUnderVespaHome("libexec64/yms/yms_check_athenz_certs"); SecretAgentCheckConfig athenzCertExpirySchedule = new SecretAgentCheckConfig("athenz-certificate-expiry", 60, athenzCertExpiryCheckPath, "--threshold", "20") .withRunAsUser("root"); configs.add(annotatedCheck(node, athenzCertExpirySchedule)); if (context.nodeType() != NodeType.config) { Path vespaHealthCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (context.nodeType() == NodeType.config) { Path configServerCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = context.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", context.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (context.nodeType() == NodeType.proxy) { Path routingAgeCheckPath = context.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", context.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = context.pathOnHostFromPathInNode("/etc/yamas-agent"); configs.forEach(s -> uncheck(() -> s.writeTo(yamasAgentFolder))); dockerOperations.executeCommandInContainerAsRoot(context.containerName(), "service", "yamas-agent", "restart"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", SecretAgentCheckConfig.nodeTypeToRole(node.getNodeType())) .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(NodeAgentContext context, NodeSpec node) { try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(context, node); if (!config.isPresent()) return; Path filebeatPath = context.pathOnHostFromPathInNode("/etc/filebeat/filebeat.yml"); Files.write(filebeatPath, config.get().getBytes()); context.log(logger, "Wrote filebeat config"); } catch (Throwable t) { context.log(logger, LogLevel.ERROR, "Failed writing filebeat config", t); } } public Optional<Long> getDiskUsageFor(NodeAgentContext context) { Path containerDir = context.pathOnHostFromPathInNode("/"); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { context.log(logger, LogLevel.WARNING, "Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) return 0; Process duCommand = new ProcessBuilder().command("du", "-xsk", path.toString()).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); duCommand.waitFor(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** Deletes old log files for vespa, nginx, logstash, etc. */ /** Checks if container has any new coredumps, reports and archives them if so */ public void handleCoreDumpsForContainer(NodeAgentContext context, NodeSpec node) { final Path coredumpsPath = context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("var/crash")); final Map<String, Object> nodeAttributes = getCoredumpNodeAttributes(node); try { coredumpHandler.processAll(coredumpsPath, nodeAttributes); } catch (IOException e) { throw new UncheckedIOException("Failed to process coredumps", e); } } private Map<String, Object> getCoredumpNodeAttributes(NodeSpec node) { Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); return Collections.unmodifiableMap(attributes); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void archiveNodeStorage(NodeAgentContext context) { Path logsDirInContainer = context.pathInNodeUnderVespaHome("logs"); Path containerLogsOnHost = context.pathOnHostFromPathInNode(logsDirInContainer); Path containerLogsInArchiveDir = archiveContainerStoragePath .resolve(context.containerName().asString() + "_" + DATE_TIME_FORMATTER.format(Instant.now()) + logsDirInContainer); new UnixPath(containerLogsInArchiveDir).createParents(); new UnixPath(containerLogsOnHost).moveIfExists(containerLogsInArchiveDir); new UnixPath(context.pathOnHostFromPathInNode("/")).deleteRecursively(); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--bandwidth", Double.toString(node.getBandwidth()), "--ips", String.join(",", node.getIpAddresses()))); if (environment.getDockerNetworking() == DockerNetworking.HOST_NETWORK) { arguments.add("--skip-reverse-lookup"); } node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOSTNAME=" + getDefaults().vespaHostname(), "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } }
class StorageMaintainer { private static final Logger logger = Logger.getLogger(StorageMaintainer.class.getName()); private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter .ofPattern("yyyyMMddHHmmss").withZone(ZoneOffset.UTC); private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final CoredumpHandler coredumpHandler; private final Path archiveContainerStoragePath; public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, Environment environment, CoredumpHandler coredumpHandler, Path archiveContainerStoragePath) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.coredumpHandler = coredumpHandler; this.archiveContainerStoragePath = archiveContainerStoragePath; } public void writeMetricsConfig(NodeAgentContext context, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", context.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); Path athenzCertExpiryCheckPath = context.pathInNodeUnderVespaHome("libexec64/yms/yms_check_athenz_certs"); SecretAgentCheckConfig athenzCertExpirySchedule = new SecretAgentCheckConfig("athenz-certificate-expiry", 60, athenzCertExpiryCheckPath, "--threshold", "20") .withRunAsUser("root"); configs.add(annotatedCheck(node, athenzCertExpirySchedule)); if (context.nodeType() != NodeType.config) { Path vespaHealthCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (context.nodeType() == NodeType.config) { Path configServerCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = context.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", context.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (context.nodeType() == NodeType.proxy) { Path routingAgeCheckPath = context.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", context.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = context.pathOnHostFromPathInNode("/etc/yamas-agent"); configs.forEach(s -> uncheck(() -> s.writeTo(yamasAgentFolder))); dockerOperations.executeCommandInContainerAsRoot(context.containerName(), "service", "yamas-agent", "restart"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", SecretAgentCheckConfig.nodeTypeToRole(node.getNodeType())) .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(NodeAgentContext context, NodeSpec node) { try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(context, node); if (!config.isPresent()) return; Path filebeatPath = context.pathOnHostFromPathInNode("/etc/filebeat/filebeat.yml"); Files.write(filebeatPath, config.get().getBytes()); context.log(logger, "Wrote filebeat config"); } catch (Throwable t) { context.log(logger, LogLevel.ERROR, "Failed writing filebeat config", t); } } public Optional<Long> getDiskUsageFor(NodeAgentContext context) { Path containerDir = context.pathOnHostFromPathInNode("/"); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { context.log(logger, LogLevel.WARNING, "Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) return 0; Process duCommand = new ProcessBuilder().command("du", "-xsk", path.toString()).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); duCommand.waitFor(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** Deletes old log files for vespa, nginx, logstash, etc. */ /** Checks if container has any new coredumps, reports and archives them if so */ public void handleCoreDumpsForContainer(NodeAgentContext context, NodeSpec node) { final Path coredumpsPath = context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("var/crash")); final Map<String, Object> nodeAttributes = getCoredumpNodeAttributes(node); try { coredumpHandler.processAll(coredumpsPath, nodeAttributes); } catch (IOException e) { throw new UncheckedIOException("Failed to process coredumps", e); } } private Map<String, Object> getCoredumpNodeAttributes(NodeSpec node) { Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); return Collections.unmodifiableMap(attributes); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void archiveNodeStorage(NodeAgentContext context) { Path logsDirInContainer = context.pathInNodeUnderVespaHome("logs"); Path containerLogsOnHost = context.pathOnHostFromPathInNode(logsDirInContainer); Path containerLogsInArchiveDir = archiveContainerStoragePath .resolve(context.containerName().asString() + "_" + DATE_TIME_FORMATTER.format(Instant.now()) + logsDirInContainer); new UnixPath(containerLogsInArchiveDir).createParents(); new UnixPath(containerLogsOnHost).moveIfExists(containerLogsInArchiveDir); new UnixPath(context.pathOnHostFromPathInNode("/")).deleteRecursively(); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--bandwidth", Double.toString(node.getBandwidth()), "--ips", String.join(",", node.getIpAddresses()))); if (environment.getDockerNetworking() == DockerNetworking.HOST_NETWORK) { arguments.add("--skip-reverse-lookup"); } node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOSTNAME=" + getDefaults().vespaHostname(), "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } }
Will do. Might get a little polluted to also have that method accept either `String` or `Path`
public void removeOldFilesFromNode(NodeAgentContext context) { Path[] logPaths = { context.pathInNodeUnderVespaHome("logs/elasticsearch2"), context.pathInNodeUnderVespaHome("logs/logstash2"), context.pathInNodeUnderVespaHome("logs/daemontools_y"), context.pathInNodeUnderVespaHome("logs/nginx"), context.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : logPaths) { Path path = context.pathOnHostFromPathInNode(pathToClean); FileFinder.files(path) .match(olderThan(Duration.ofDays(3)).and(nameMatches(Pattern.compile(".*\\.log.+")))) .maxDepth(1) .deleteRecursively(); } FileFinder.files(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("logs/vespa/qrs"))) .match(olderThan(Duration.ofDays(3))) .deleteRecursively(); FileFinder.files(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("logs/vespa/logarchive"))) .match(olderThan(Duration.ofDays(31))) .deleteRecursively(); FileFinder.directories(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("var/db/vespa/filedistribution"))) .match(olderThan(Duration.ofDays(31))) .deleteRecursively(); }
FileFinder.files(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("logs/vespa/qrs")))
public void removeOldFilesFromNode(NodeAgentContext context) { Path[] logPaths = { context.pathInNodeUnderVespaHome("logs/elasticsearch2"), context.pathInNodeUnderVespaHome("logs/logstash2"), context.pathInNodeUnderVespaHome("logs/daemontools_y"), context.pathInNodeUnderVespaHome("logs/nginx"), context.pathInNodeUnderVespaHome("logs/vespa") }; for (Path pathToClean : logPaths) { Path path = context.pathOnHostFromPathInNode(pathToClean); FileFinder.files(path) .match(olderThan(Duration.ofDays(3)).and(nameMatches(Pattern.compile(".*\\.log.+")))) .maxDepth(1) .deleteRecursively(); } FileFinder.files(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("logs/vespa/qrs"))) .match(olderThan(Duration.ofDays(3))) .deleteRecursively(); FileFinder.files(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("logs/vespa/logarchive"))) .match(olderThan(Duration.ofDays(31))) .deleteRecursively(); FileFinder.directories(context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("var/db/vespa/filedistribution"))) .match(olderThan(Duration.ofDays(31))) .deleteRecursively(); }
class StorageMaintainer { private static final Logger logger = Logger.getLogger(StorageMaintainer.class.getName()); private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter .ofPattern("yyyyMMddHHmmss").withZone(ZoneOffset.UTC); private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final CoredumpHandler coredumpHandler; private final Path archiveContainerStoragePath; public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, Environment environment, CoredumpHandler coredumpHandler, Path archiveContainerStoragePath) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.coredumpHandler = coredumpHandler; this.archiveContainerStoragePath = archiveContainerStoragePath; } public void writeMetricsConfig(NodeAgentContext context, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", context.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); Path athenzCertExpiryCheckPath = context.pathInNodeUnderVespaHome("libexec64/yms/yms_check_athenz_certs"); SecretAgentCheckConfig athenzCertExpirySchedule = new SecretAgentCheckConfig("athenz-certificate-expiry", 60, athenzCertExpiryCheckPath, "--threshold", "20") .withRunAsUser("root"); configs.add(annotatedCheck(node, athenzCertExpirySchedule)); if (context.nodeType() != NodeType.config) { Path vespaHealthCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (context.nodeType() == NodeType.config) { Path configServerCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = context.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", context.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (context.nodeType() == NodeType.proxy) { Path routingAgeCheckPath = context.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", context.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = context.pathOnHostFromPathInNode("/etc/yamas-agent"); configs.forEach(s -> uncheck(() -> s.writeTo(yamasAgentFolder))); dockerOperations.executeCommandInContainerAsRoot(context.containerName(), "service", "yamas-agent", "restart"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", SecretAgentCheckConfig.nodeTypeToRole(node.getNodeType())) .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(NodeAgentContext context, NodeSpec node) { try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(context, node); if (!config.isPresent()) return; Path filebeatPath = context.pathOnHostFromPathInNode("/etc/filebeat/filebeat.yml"); Files.write(filebeatPath, config.get().getBytes()); context.log(logger, "Wrote filebeat config"); } catch (Throwable t) { context.log(logger, LogLevel.ERROR, "Failed writing filebeat config", t); } } public Optional<Long> getDiskUsageFor(NodeAgentContext context) { Path containerDir = context.pathOnHostFromPathInNode("/"); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { context.log(logger, LogLevel.WARNING, "Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) return 0; Process duCommand = new ProcessBuilder().command("du", "-xsk", path.toString()).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); duCommand.waitFor(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** Deletes old log files for vespa, nginx, logstash, etc. */ /** Checks if container has any new coredumps, reports and archives them if so */ public void handleCoreDumpsForContainer(NodeAgentContext context, NodeSpec node) { final Path coredumpsPath = context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("var/crash")); final Map<String, Object> nodeAttributes = getCoredumpNodeAttributes(node); try { coredumpHandler.processAll(coredumpsPath, nodeAttributes); } catch (IOException e) { throw new UncheckedIOException("Failed to process coredumps", e); } } private Map<String, Object> getCoredumpNodeAttributes(NodeSpec node) { Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); return Collections.unmodifiableMap(attributes); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void archiveNodeStorage(NodeAgentContext context) { Path logsDirInContainer = context.pathInNodeUnderVespaHome("logs"); Path containerLogsOnHost = context.pathOnHostFromPathInNode(logsDirInContainer); Path containerLogsInArchiveDir = archiveContainerStoragePath .resolve(context.containerName().asString() + "_" + DATE_TIME_FORMATTER.format(Instant.now()) + logsDirInContainer); new UnixPath(containerLogsInArchiveDir).createParents(); new UnixPath(containerLogsOnHost).moveIfExists(containerLogsInArchiveDir); new UnixPath(context.pathOnHostFromPathInNode("/")).deleteRecursively(); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--bandwidth", Double.toString(node.getBandwidth()), "--ips", String.join(",", node.getIpAddresses()))); if (environment.getDockerNetworking() == DockerNetworking.HOST_NETWORK) { arguments.add("--skip-reverse-lookup"); } node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOSTNAME=" + getDefaults().vespaHostname(), "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } }
class StorageMaintainer { private static final Logger logger = Logger.getLogger(StorageMaintainer.class.getName()); private static final DateTimeFormatter DATE_TIME_FORMATTER = DateTimeFormatter .ofPattern("yyyyMMddHHmmss").withZone(ZoneOffset.UTC); private final DockerOperations dockerOperations; private final ProcessExecuter processExecuter; private final Environment environment; private final CoredumpHandler coredumpHandler; private final Path archiveContainerStoragePath; public StorageMaintainer(DockerOperations dockerOperations, ProcessExecuter processExecuter, Environment environment, CoredumpHandler coredumpHandler, Path archiveContainerStoragePath) { this.dockerOperations = dockerOperations; this.processExecuter = processExecuter; this.environment = environment; this.coredumpHandler = coredumpHandler; this.archiveContainerStoragePath = archiveContainerStoragePath; } public void writeMetricsConfig(NodeAgentContext context, NodeSpec node) { List<SecretAgentCheckConfig> configs = new ArrayList<>(); Path hostLifeCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_host_life"); SecretAgentCheckConfig hostLifeSchedule = new SecretAgentCheckConfig("host-life", 60, hostLifeCheckPath); configs.add(annotatedCheck(node, hostLifeSchedule)); Path ntpCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ntp"); SecretAgentCheckConfig ntpSchedule = new SecretAgentCheckConfig("ntp", 60, ntpCheckPath); configs.add(annotatedCheck(node, ntpSchedule)); Path coredumpCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_coredumps"); SecretAgentCheckConfig coredumpSchedule = new SecretAgentCheckConfig("system-coredumps-processing", 300, coredumpCheckPath, "--application", "system-coredumps-processing", "--lastmin", "129600", "--crit", "1", "--coredir", context.pathInNodeUnderVespaHome("var/crash/processing").toString()); configs.add(annotatedCheck(node, coredumpSchedule)); Path athenzCertExpiryCheckPath = context.pathInNodeUnderVespaHome("libexec64/yms/yms_check_athenz_certs"); SecretAgentCheckConfig athenzCertExpirySchedule = new SecretAgentCheckConfig("athenz-certificate-expiry", 60, athenzCertExpiryCheckPath, "--threshold", "20") .withRunAsUser("root"); configs.add(annotatedCheck(node, athenzCertExpirySchedule)); if (context.nodeType() != NodeType.config) { Path vespaHealthCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa_health"); SecretAgentCheckConfig vespaHealthSchedule = new SecretAgentCheckConfig("vespa-health", 60, vespaHealthCheckPath, "all"); configs.add(annotatedCheck(node, vespaHealthSchedule)); Path vespaCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_vespa"); SecretAgentCheckConfig vespaSchedule = new SecretAgentCheckConfig("vespa", 60, vespaCheckPath, "all"); configs.add(annotatedCheck(node, vespaSchedule)); } if (context.nodeType() == NodeType.config) { Path configServerCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ymonsb2"); SecretAgentCheckConfig configServerSchedule = new SecretAgentCheckConfig("configserver", 60, configServerCheckPath, "-zero", "configserver"); configs.add(annotatedCheck(node, configServerSchedule)); Path zkbackupCheckPath = context.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig zkbackupSchedule = new SecretAgentCheckConfig("zkbackupage", 300, zkbackupCheckPath, "-f", context.pathInNodeUnderVespaHome("var/vespa-hosted/zkbackup.stat").toString(), "-m", "150", "-a", "config-zkbackupage"); configs.add(annotatedCheck(node, zkbackupSchedule)); } if (context.nodeType() == NodeType.proxy) { Path routingAgeCheckPath = context.pathInNodeUnderVespaHome("libexec/yamas2/yms_check_file_age.py"); SecretAgentCheckConfig routingAgeSchedule = new SecretAgentCheckConfig("routing-configage", 60, routingAgeCheckPath, "-f", context.pathInNodeUnderVespaHome("var/vespa-hosted/routing/nginx.conf").toString(), "-m", "90", "-a", "routing-configage"); configs.add(annotatedCheck(node, routingAgeSchedule)); Path sslCheckPath = context.pathInNodeUnderVespaHome("libexec/yms/yms_check_ssl_status"); SecretAgentCheckConfig sslSchedule = new SecretAgentCheckConfig("ssl-status", 300, sslCheckPath, "-e", "localhost", "-p", "4443", "-t", "30"); configs.add(annotatedCheck(node, sslSchedule)); } Path yamasAgentFolder = context.pathOnHostFromPathInNode("/etc/yamas-agent"); configs.forEach(s -> uncheck(() -> s.writeTo(yamasAgentFolder))); dockerOperations.executeCommandInContainerAsRoot(context.containerName(), "service", "yamas-agent", "restart"); } private SecretAgentCheckConfig annotatedCheck(NodeSpec node, SecretAgentCheckConfig check) { check.withTag("namespace", "Vespa") .withTag("role", SecretAgentCheckConfig.nodeTypeToRole(node.getNodeType())) .withTag("flavor", node.getFlavor()) .withTag("canonicalFlavor", node.getCanonicalFlavor()) .withTag("state", node.getState().toString()) .withTag("zone", environment.getZone()) .withTag("parentHostname", environment.getParentHostHostname()); node.getOwner().ifPresent(owner -> check .withTag("tenantName", owner.getTenant()) .withTag("app", owner.getApplication() + "." + owner.getInstance()) .withTag("applicationName", owner.getApplication()) .withTag("instanceName", owner.getInstance()) .withTag("applicationId", owner.getTenant() + "." + owner.getApplication() + "." + owner.getInstance())); node.getMembership().ifPresent(membership -> check .withTag("clustertype", membership.getClusterType()) .withTag("clusterid", membership.getClusterId())); node.getVespaVersion().ifPresent(version -> check.withTag("vespaVersion", version)); return check; } public void writeFilebeatConfig(NodeAgentContext context, NodeSpec node) { try { FilebeatConfigProvider filebeatConfigProvider = new FilebeatConfigProvider(environment); Optional<String> config = filebeatConfigProvider.getConfig(context, node); if (!config.isPresent()) return; Path filebeatPath = context.pathOnHostFromPathInNode("/etc/filebeat/filebeat.yml"); Files.write(filebeatPath, config.get().getBytes()); context.log(logger, "Wrote filebeat config"); } catch (Throwable t) { context.log(logger, LogLevel.ERROR, "Failed writing filebeat config", t); } } public Optional<Long> getDiskUsageFor(NodeAgentContext context) { Path containerDir = context.pathOnHostFromPathInNode("/"); try { return Optional.of(getDiskUsedInBytes(containerDir)); } catch (Throwable e) { context.log(logger, LogLevel.WARNING, "Problems during disk usage calculations in " + containerDir.toAbsolutePath(), e); return Optional.empty(); } } long getDiskUsedInBytes(Path path) throws IOException, InterruptedException { if (!Files.exists(path)) return 0; Process duCommand = new ProcessBuilder().command("du", "-xsk", path.toString()).start(); if (!duCommand.waitFor(60, TimeUnit.SECONDS)) { duCommand.destroy(); duCommand.waitFor(); throw new RuntimeException("Disk usage command timed out, aborting."); } String output = IOUtils.readAll(new InputStreamReader(duCommand.getInputStream())); String[] results = output.split("\t"); if (results.length != 2) { throw new RuntimeException("Result from disk usage command not as expected: " + output); } long diskUsageKB = Long.valueOf(results[0]); return diskUsageKB * 1024; } /** Deletes old log files for vespa, nginx, logstash, etc. */ /** Checks if container has any new coredumps, reports and archives them if so */ public void handleCoreDumpsForContainer(NodeAgentContext context, NodeSpec node) { final Path coredumpsPath = context.pathOnHostFromPathInNode(context.pathInNodeUnderVespaHome("var/crash")); final Map<String, Object> nodeAttributes = getCoredumpNodeAttributes(node); try { coredumpHandler.processAll(coredumpsPath, nodeAttributes); } catch (IOException e) { throw new UncheckedIOException("Failed to process coredumps", e); } } private Map<String, Object> getCoredumpNodeAttributes(NodeSpec node) { Map<String, Object> attributes = new HashMap<>(); attributes.put("hostname", node.getHostname()); attributes.put("parent_hostname", environment.getParentHostHostname()); attributes.put("region", environment.getRegion()); attributes.put("environment", environment.getEnvironment()); attributes.put("flavor", node.getFlavor()); attributes.put("kernel_version", System.getProperty("os.version")); node.getCurrentDockerImage().ifPresent(image -> attributes.put("docker_image", image.asString())); node.getVespaVersion().ifPresent(version -> attributes.put("vespa_version", version)); node.getOwner().ifPresent(owner -> { attributes.put("tenant", owner.getTenant()); attributes.put("application", owner.getApplication()); attributes.put("instance", owner.getInstance()); }); return Collections.unmodifiableMap(attributes); } /** * Prepares the container-storage for the next container by deleting/archiving all the data of the current container. * Removes old files, reports coredumps and archives container data, runs when container enters state "dirty" */ public void archiveNodeStorage(NodeAgentContext context) { Path logsDirInContainer = context.pathInNodeUnderVespaHome("logs"); Path containerLogsOnHost = context.pathOnHostFromPathInNode(logsDirInContainer); Path containerLogsInArchiveDir = archiveContainerStoragePath .resolve(context.containerName().asString() + "_" + DATE_TIME_FORMATTER.format(Instant.now()) + logsDirInContainer); new UnixPath(containerLogsInArchiveDir).createParents(); new UnixPath(containerLogsOnHost).moveIfExists(containerLogsInArchiveDir); new UnixPath(context.pathOnHostFromPathInNode("/")).deleteRecursively(); } /** * Runs node-maintainer's SpecVerifier and returns its output * @param node Node specification containing the excepted values we want to verify against * @return new combined hardware divergence * @throws RuntimeException if exit code != 0 */ public String getHardwareDivergence(NodeSpec node) { List<String> arguments = new ArrayList<>(Arrays.asList("specification", "--disk", Double.toString(node.getMinDiskAvailableGb()), "--memory", Double.toString(node.getMinMainMemoryAvailableGb()), "--cpu_cores", Double.toString(node.getMinCpuCores()), "--is_ssd", Boolean.toString(node.isFastDisk()), "--bandwidth", Double.toString(node.getBandwidth()), "--ips", String.join(",", node.getIpAddresses()))); if (environment.getDockerNetworking() == DockerNetworking.HOST_NETWORK) { arguments.add("--skip-reverse-lookup"); } node.getHardwareDivergence().ifPresent(hardwareDivergence -> { arguments.add("--divergence"); arguments.add(hardwareDivergence); }); return executeMaintainer("com.yahoo.vespa.hosted.node.verification.Main", arguments.toArray(new String[0])); } private String executeMaintainer(String mainClass, String... args) { String[] command = Stream.concat( Stream.of("sudo", "VESPA_HOSTNAME=" + getDefaults().vespaHostname(), "VESPA_HOME=" + getDefaults().vespaHome(), getDefaults().underVespaHome("libexec/vespa/node-admin/maintenance.sh"), mainClass), Stream.of(args)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(command); if (result.getFirst() != 0) { throw new RuntimeException( String.format("Maintainer failed to execute command: %s, Exit code: %d, Stdout/stderr: %s", Arrays.toString(command), result.getFirst(), result.getSecond())); } return result.getSecond().trim(); } catch (IOException e) { throw new RuntimeException("Failed to execute maintainer", e); } } }
This is a strange comment. All paths are unique to each container since they have separate mount namespaces.
private static void addMounts(NodeAgentContext context, Docker.CreateContainerCommand command) { final Path varLibSia = Paths.get("/var/lib/sia"); List<Path> paths = Arrays.asList( Paths.get("/etc/yamas-agent"), Paths.get("/etc/filebeat"), context.pathInNodeUnderVespaHome("logs/daemontools_y"), context.pathInNodeUnderVespaHome("logs/jdisc_core"), context.pathInNodeUnderVespaHome("logs/langdetect/"), context.pathInNodeUnderVespaHome("logs/nginx"), context.pathInNodeUnderVespaHome("logs/vespa"), context.pathInNodeUnderVespaHome("logs/yca"), context.pathInNodeUnderVespaHome("logs/yck"), context.pathInNodeUnderVespaHome("logs/yell"), context.pathInNodeUnderVespaHome("logs/ykeykey"), context.pathInNodeUnderVespaHome("logs/ykeykeyd"), context.pathInNodeUnderVespaHome("logs/yms_agent"), context.pathInNodeUnderVespaHome("logs/ysar"), context.pathInNodeUnderVespaHome("logs/ystatus"), context.pathInNodeUnderVespaHome("logs/zpu"), context.pathInNodeUnderVespaHome("var/cache"), context.pathInNodeUnderVespaHome("var/crash"), context.pathInNodeUnderVespaHome("var/db/jdisc"), context.pathInNodeUnderVespaHome("var/db/vespa"), context.pathInNodeUnderVespaHome("var/jdisc_container"), context.pathInNodeUnderVespaHome("var/jdisc_core"), context.pathInNodeUnderVespaHome("var/maven"), context.pathInNodeUnderVespaHome("var/mediasearch"), context.pathInNodeUnderVespaHome("var/run"), context.pathInNodeUnderVespaHome("var/scoreboards"), context.pathInNodeUnderVespaHome("var/service"), context.pathInNodeUnderVespaHome("var/share"), context.pathInNodeUnderVespaHome("var/spool"), context.pathInNodeUnderVespaHome("var/vespa"), context.pathInNodeUnderVespaHome("var/yca"), context.pathInNodeUnderVespaHome("var/ycore++"), context.pathInNodeUnderVespaHome("var/zookeeper"), context.pathInNodeUnderVespaHome("tmp"), context.pathInNodeUnderVespaHome("var/container-data")); if (context.nodeType() == NodeType.proxyhost) paths.add(context.pathInNodeUnderVespaHome("var/vespa-hosted/routing")); if (context.nodeType() == NodeType.host) paths.add(varLibSia); paths.forEach(path -> command.withVolume(context.pathOnHostFromPathInNode(path), path)); if (isInfrastructureHost(context.nodeType())) command.withSharedVolume(varLibSia, varLibSia); if (context.nodeType() == NodeType.proxyhost) command.withSharedVolume(Paths.get("/opt/yahoo/share/ssl/certs"), Paths.get("/opt/yahoo/share/ssl/certs")); if (context.nodeType() == NodeType.host) command.withSharedVolume(Paths.get("/var/zpe"), context.pathInNodeUnderVespaHome("var/zpe")); }
private static void addMounts(NodeAgentContext context, Docker.CreateContainerCommand command) { final Path varLibSia = Paths.get("/var/lib/sia"); List<Path> paths = new ArrayList<>(Arrays.asList( Paths.get("/etc/yamas-agent"), Paths.get("/etc/filebeat"), context.pathInNodeUnderVespaHome("logs/daemontools_y"), context.pathInNodeUnderVespaHome("logs/jdisc_core"), context.pathInNodeUnderVespaHome("logs/langdetect/"), context.pathInNodeUnderVespaHome("logs/nginx"), context.pathInNodeUnderVespaHome("logs/vespa"), context.pathInNodeUnderVespaHome("logs/yca"), context.pathInNodeUnderVespaHome("logs/yck"), context.pathInNodeUnderVespaHome("logs/yell"), context.pathInNodeUnderVespaHome("logs/ykeykey"), context.pathInNodeUnderVespaHome("logs/ykeykeyd"), context.pathInNodeUnderVespaHome("logs/yms_agent"), context.pathInNodeUnderVespaHome("logs/ysar"), context.pathInNodeUnderVespaHome("logs/ystatus"), context.pathInNodeUnderVespaHome("logs/zpu"), context.pathInNodeUnderVespaHome("var/cache"), context.pathInNodeUnderVespaHome("var/crash"), context.pathInNodeUnderVespaHome("var/db/jdisc"), context.pathInNodeUnderVespaHome("var/db/vespa"), context.pathInNodeUnderVespaHome("var/jdisc_container"), context.pathInNodeUnderVespaHome("var/jdisc_core"), context.pathInNodeUnderVespaHome("var/maven"), context.pathInNodeUnderVespaHome("var/mediasearch"), context.pathInNodeUnderVespaHome("var/run"), context.pathInNodeUnderVespaHome("var/scoreboards"), context.pathInNodeUnderVespaHome("var/service"), context.pathInNodeUnderVespaHome("var/share"), context.pathInNodeUnderVespaHome("var/spool"), context.pathInNodeUnderVespaHome("var/vespa"), context.pathInNodeUnderVespaHome("var/yca"), context.pathInNodeUnderVespaHome("var/ycore++"), context.pathInNodeUnderVespaHome("var/zookeeper"), context.pathInNodeUnderVespaHome("tmp"), context.pathInNodeUnderVespaHome("var/container-data"))); if (context.nodeType() == NodeType.proxyhost) paths.add(context.pathInNodeUnderVespaHome("var/vespa-hosted/routing")); if (context.nodeType() == NodeType.host) paths.add(varLibSia); paths.forEach(path -> command.withVolume(context.pathOnHostFromPathInNode(path), path)); if (isInfrastructureHost(context.nodeType())) command.withSharedVolume(varLibSia, varLibSia); if (context.nodeType() == NodeType.proxyhost) command.withSharedVolume(Paths.get("/opt/yahoo/share/ssl/certs"), Paths.get("/opt/yahoo/share/ssl/certs")); if (context.nodeType() == NodeType.host) command.withSharedVolume(Paths.get("/var/zpe"), context.pathInNodeUnderVespaHome("var/zpe")); }
class DockerOperationsImpl implements DockerOperations { private static final Logger logger = Logger.getLogger(DockerOperationsImpl.class.getName()); private static final String MANAGER_NAME = "node-admin"; private static final String IPV6_NPT_PREFIX = "fd00::"; private static final String IPV4_NPT_PREFIX = "172.17.0.0"; private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void createContainer(NodeAgentContext context, NodeSpec node, ContainerData containerData) { context.log(logger, "Creating container"); Inet6Address ipV6Address = environment.getIpAddresses().getIPv6Address(node.getHostname()).orElseThrow( () -> new RuntimeException("Unable to find a valid IPv6 address for " + node.getHostname() + ". Missing an AAAA DNS entry?")); String configServers = String.join(",", environment.getConfigServerHostNames()); Docker.CreateContainerCommand command = docker.createContainerCommand( node.getWantedDockerImage().get(), ContainerResources.from(node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()), context.containerName(), node.getHostname()) .withManagedBy(MANAGER_NAME) .withEnvironment("VESPA_CONFIGSERVERS", configServers) .withEnvironment("CONTAINER_ENVIRONMENT_SETTINGS", environment.getContainerEnvironmentResolver().createSettings(environment, node)) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); DockerNetworking networking = environment.getDockerNetworking(); command.withNetworkMode(networking.getDockerNetworkMode()); if (networking == DockerNetworking.NPT) { InetAddress ipV6Prefix = InetAddresses.forString(IPV6_NPT_PREFIX); InetAddress ipV6Local = IPAddresses.prefixTranslate(ipV6Address, ipV6Prefix, 8); command.withIpAddress(ipV6Local); Optional<InetAddress> ipV4Local = environment.getIpAddresses().getIPv4Address(node.getHostname()) .map(ipV4Address -> { InetAddress ipV4Prefix = InetAddresses.forString(IPV4_NPT_PREFIX); return IPAddresses.prefixTranslate(ipV4Address, ipV4Prefix, 2); }); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, node.getHostname(), ipV4Local, ipV6Local); } addMounts(context, command); long minMainMemoryAvailableMb = (long) (node.getMinMainMemoryAvailableGb() * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Creating new container with args: " + command); command.create(); } void addEtcHosts(ContainerData containerData, String hostname, Optional<InetAddress> ipV4Local, InetAddress ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n" + ipV6Local.getHostAddress() + '\t' + hostname + '\n'); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress() + '\t' + hostname + '\n')); containerData.addFile(Paths.get("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); docker.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); docker.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); docker.deleteContainer(context.containerName()); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return docker.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return docker.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { context.log(logger, context.containerName().asString() + " " + Arrays.asList(command)); return docker.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public ProcessResult executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { throw new RuntimeException(String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond())); } return new ProcessResult(0, result.getSecond(), ""); } catch (IOException e) { throw new RuntimeException(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); } } @Override public void resumeNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "resume"); } @Override public void suspendNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "suspend"); } @Override public void restartVespa(NodeAgentContext context) { executeNodeCtlInContainer(context, "restart-vespa"); } @Override public void startServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "start"); } @Override public void stopServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "stop"); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return docker.getContainerStats(context.containerName()); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } /** * Returns map of directories to mount and whether they should be writable by everyone */ /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.confighost || nodeType == NodeType.proxyhost || nodeType == NodeType.controllerhost; } }
class DockerOperationsImpl implements DockerOperations { private static final Logger logger = Logger.getLogger(DockerOperationsImpl.class.getName()); private static final String MANAGER_NAME = "node-admin"; private static final String IPV6_NPT_PREFIX = "fd00::"; private static final String IPV4_NPT_PREFIX = "172.17.0.0"; private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void createContainer(NodeAgentContext context, NodeSpec node, ContainerData containerData) { context.log(logger, "Creating container"); Inet6Address ipV6Address = environment.getIpAddresses().getIPv6Address(node.getHostname()).orElseThrow( () -> new RuntimeException("Unable to find a valid IPv6 address for " + node.getHostname() + ". Missing an AAAA DNS entry?")); String configServers = String.join(",", environment.getConfigServerHostNames()); Docker.CreateContainerCommand command = docker.createContainerCommand( node.getWantedDockerImage().get(), ContainerResources.from(node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()), context.containerName(), node.getHostname()) .withManagedBy(MANAGER_NAME) .withEnvironment("VESPA_CONFIGSERVERS", configServers) .withEnvironment("CONTAINER_ENVIRONMENT_SETTINGS", environment.getContainerEnvironmentResolver().createSettings(environment, node)) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); DockerNetworking networking = environment.getDockerNetworking(); command.withNetworkMode(networking.getDockerNetworkMode()); if (networking == DockerNetworking.NPT) { InetAddress ipV6Prefix = InetAddresses.forString(IPV6_NPT_PREFIX); InetAddress ipV6Local = IPAddresses.prefixTranslate(ipV6Address, ipV6Prefix, 8); command.withIpAddress(ipV6Local); Optional<InetAddress> ipV4Local = environment.getIpAddresses().getIPv4Address(node.getHostname()) .map(ipV4Address -> { InetAddress ipV4Prefix = InetAddresses.forString(IPV4_NPT_PREFIX); return IPAddresses.prefixTranslate(ipV4Address, ipV4Prefix, 2); }); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, node.getHostname(), ipV4Local, ipV6Local); } addMounts(context, command); long minMainMemoryAvailableMb = (long) (node.getMinMainMemoryAvailableGb() * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Creating new container with args: " + command); command.create(); } void addEtcHosts(ContainerData containerData, String hostname, Optional<InetAddress> ipV4Local, InetAddress ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n" + ipV6Local.getHostAddress() + '\t' + hostname + '\n'); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress() + '\t' + hostname + '\n')); containerData.addFile(Paths.get("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); docker.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); docker.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); docker.deleteContainer(context.containerName()); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return docker.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return docker.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { return docker.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public ProcessResult executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { throw new RuntimeException(String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond())); } return new ProcessResult(0, result.getSecond(), ""); } catch (IOException e) { throw new RuntimeException(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); } } @Override public void resumeNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "resume"); } @Override public void suspendNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "suspend"); } @Override public void restartVespa(NodeAgentContext context) { executeNodeCtlInContainer(context, "restart-vespa"); } @Override public void startServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "start"); } @Override public void stopServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "stop"); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return docker.getContainerStats(context.containerName()); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } /** * Returns map of directories to mount and whether they should be writable by everyone */ /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.confighost || nodeType == NodeType.proxyhost || nodeType == NodeType.controllerhost; } }
The source is unique
private static void addMounts(NodeAgentContext context, Docker.CreateContainerCommand command) { final Path varLibSia = Paths.get("/var/lib/sia"); List<Path> paths = Arrays.asList( Paths.get("/etc/yamas-agent"), Paths.get("/etc/filebeat"), context.pathInNodeUnderVespaHome("logs/daemontools_y"), context.pathInNodeUnderVespaHome("logs/jdisc_core"), context.pathInNodeUnderVespaHome("logs/langdetect/"), context.pathInNodeUnderVespaHome("logs/nginx"), context.pathInNodeUnderVespaHome("logs/vespa"), context.pathInNodeUnderVespaHome("logs/yca"), context.pathInNodeUnderVespaHome("logs/yck"), context.pathInNodeUnderVespaHome("logs/yell"), context.pathInNodeUnderVespaHome("logs/ykeykey"), context.pathInNodeUnderVespaHome("logs/ykeykeyd"), context.pathInNodeUnderVespaHome("logs/yms_agent"), context.pathInNodeUnderVespaHome("logs/ysar"), context.pathInNodeUnderVespaHome("logs/ystatus"), context.pathInNodeUnderVespaHome("logs/zpu"), context.pathInNodeUnderVespaHome("var/cache"), context.pathInNodeUnderVespaHome("var/crash"), context.pathInNodeUnderVespaHome("var/db/jdisc"), context.pathInNodeUnderVespaHome("var/db/vespa"), context.pathInNodeUnderVespaHome("var/jdisc_container"), context.pathInNodeUnderVespaHome("var/jdisc_core"), context.pathInNodeUnderVespaHome("var/maven"), context.pathInNodeUnderVespaHome("var/mediasearch"), context.pathInNodeUnderVespaHome("var/run"), context.pathInNodeUnderVespaHome("var/scoreboards"), context.pathInNodeUnderVespaHome("var/service"), context.pathInNodeUnderVespaHome("var/share"), context.pathInNodeUnderVespaHome("var/spool"), context.pathInNodeUnderVespaHome("var/vespa"), context.pathInNodeUnderVespaHome("var/yca"), context.pathInNodeUnderVespaHome("var/ycore++"), context.pathInNodeUnderVespaHome("var/zookeeper"), context.pathInNodeUnderVespaHome("tmp"), context.pathInNodeUnderVespaHome("var/container-data")); if (context.nodeType() == NodeType.proxyhost) paths.add(context.pathInNodeUnderVespaHome("var/vespa-hosted/routing")); if (context.nodeType() == NodeType.host) paths.add(varLibSia); paths.forEach(path -> command.withVolume(context.pathOnHostFromPathInNode(path), path)); if (isInfrastructureHost(context.nodeType())) command.withSharedVolume(varLibSia, varLibSia); if (context.nodeType() == NodeType.proxyhost) command.withSharedVolume(Paths.get("/opt/yahoo/share/ssl/certs"), Paths.get("/opt/yahoo/share/ssl/certs")); if (context.nodeType() == NodeType.host) command.withSharedVolume(Paths.get("/var/zpe"), context.pathInNodeUnderVespaHome("var/zpe")); }
private static void addMounts(NodeAgentContext context, Docker.CreateContainerCommand command) { final Path varLibSia = Paths.get("/var/lib/sia"); List<Path> paths = new ArrayList<>(Arrays.asList( Paths.get("/etc/yamas-agent"), Paths.get("/etc/filebeat"), context.pathInNodeUnderVespaHome("logs/daemontools_y"), context.pathInNodeUnderVespaHome("logs/jdisc_core"), context.pathInNodeUnderVespaHome("logs/langdetect/"), context.pathInNodeUnderVespaHome("logs/nginx"), context.pathInNodeUnderVespaHome("logs/vespa"), context.pathInNodeUnderVespaHome("logs/yca"), context.pathInNodeUnderVespaHome("logs/yck"), context.pathInNodeUnderVespaHome("logs/yell"), context.pathInNodeUnderVespaHome("logs/ykeykey"), context.pathInNodeUnderVespaHome("logs/ykeykeyd"), context.pathInNodeUnderVespaHome("logs/yms_agent"), context.pathInNodeUnderVespaHome("logs/ysar"), context.pathInNodeUnderVespaHome("logs/ystatus"), context.pathInNodeUnderVespaHome("logs/zpu"), context.pathInNodeUnderVespaHome("var/cache"), context.pathInNodeUnderVespaHome("var/crash"), context.pathInNodeUnderVespaHome("var/db/jdisc"), context.pathInNodeUnderVespaHome("var/db/vespa"), context.pathInNodeUnderVespaHome("var/jdisc_container"), context.pathInNodeUnderVespaHome("var/jdisc_core"), context.pathInNodeUnderVespaHome("var/maven"), context.pathInNodeUnderVespaHome("var/mediasearch"), context.pathInNodeUnderVespaHome("var/run"), context.pathInNodeUnderVespaHome("var/scoreboards"), context.pathInNodeUnderVespaHome("var/service"), context.pathInNodeUnderVespaHome("var/share"), context.pathInNodeUnderVespaHome("var/spool"), context.pathInNodeUnderVespaHome("var/vespa"), context.pathInNodeUnderVespaHome("var/yca"), context.pathInNodeUnderVespaHome("var/ycore++"), context.pathInNodeUnderVespaHome("var/zookeeper"), context.pathInNodeUnderVespaHome("tmp"), context.pathInNodeUnderVespaHome("var/container-data"))); if (context.nodeType() == NodeType.proxyhost) paths.add(context.pathInNodeUnderVespaHome("var/vespa-hosted/routing")); if (context.nodeType() == NodeType.host) paths.add(varLibSia); paths.forEach(path -> command.withVolume(context.pathOnHostFromPathInNode(path), path)); if (isInfrastructureHost(context.nodeType())) command.withSharedVolume(varLibSia, varLibSia); if (context.nodeType() == NodeType.proxyhost) command.withSharedVolume(Paths.get("/opt/yahoo/share/ssl/certs"), Paths.get("/opt/yahoo/share/ssl/certs")); if (context.nodeType() == NodeType.host) command.withSharedVolume(Paths.get("/var/zpe"), context.pathInNodeUnderVespaHome("var/zpe")); }
class DockerOperationsImpl implements DockerOperations { private static final Logger logger = Logger.getLogger(DockerOperationsImpl.class.getName()); private static final String MANAGER_NAME = "node-admin"; private static final String IPV6_NPT_PREFIX = "fd00::"; private static final String IPV4_NPT_PREFIX = "172.17.0.0"; private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void createContainer(NodeAgentContext context, NodeSpec node, ContainerData containerData) { context.log(logger, "Creating container"); Inet6Address ipV6Address = environment.getIpAddresses().getIPv6Address(node.getHostname()).orElseThrow( () -> new RuntimeException("Unable to find a valid IPv6 address for " + node.getHostname() + ". Missing an AAAA DNS entry?")); String configServers = String.join(",", environment.getConfigServerHostNames()); Docker.CreateContainerCommand command = docker.createContainerCommand( node.getWantedDockerImage().get(), ContainerResources.from(node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()), context.containerName(), node.getHostname()) .withManagedBy(MANAGER_NAME) .withEnvironment("VESPA_CONFIGSERVERS", configServers) .withEnvironment("CONTAINER_ENVIRONMENT_SETTINGS", environment.getContainerEnvironmentResolver().createSettings(environment, node)) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); DockerNetworking networking = environment.getDockerNetworking(); command.withNetworkMode(networking.getDockerNetworkMode()); if (networking == DockerNetworking.NPT) { InetAddress ipV6Prefix = InetAddresses.forString(IPV6_NPT_PREFIX); InetAddress ipV6Local = IPAddresses.prefixTranslate(ipV6Address, ipV6Prefix, 8); command.withIpAddress(ipV6Local); Optional<InetAddress> ipV4Local = environment.getIpAddresses().getIPv4Address(node.getHostname()) .map(ipV4Address -> { InetAddress ipV4Prefix = InetAddresses.forString(IPV4_NPT_PREFIX); return IPAddresses.prefixTranslate(ipV4Address, ipV4Prefix, 2); }); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, node.getHostname(), ipV4Local, ipV6Local); } addMounts(context, command); long minMainMemoryAvailableMb = (long) (node.getMinMainMemoryAvailableGb() * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Creating new container with args: " + command); command.create(); } void addEtcHosts(ContainerData containerData, String hostname, Optional<InetAddress> ipV4Local, InetAddress ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n" + ipV6Local.getHostAddress() + '\t' + hostname + '\n'); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress() + '\t' + hostname + '\n')); containerData.addFile(Paths.get("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); docker.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); docker.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); docker.deleteContainer(context.containerName()); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return docker.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return docker.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { context.log(logger, context.containerName().asString() + " " + Arrays.asList(command)); return docker.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public ProcessResult executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { throw new RuntimeException(String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond())); } return new ProcessResult(0, result.getSecond(), ""); } catch (IOException e) { throw new RuntimeException(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); } } @Override public void resumeNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "resume"); } @Override public void suspendNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "suspend"); } @Override public void restartVespa(NodeAgentContext context) { executeNodeCtlInContainer(context, "restart-vespa"); } @Override public void startServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "start"); } @Override public void stopServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "stop"); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return docker.getContainerStats(context.containerName()); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } /** * Returns map of directories to mount and whether they should be writable by everyone */ /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.confighost || nodeType == NodeType.proxyhost || nodeType == NodeType.controllerhost; } }
class DockerOperationsImpl implements DockerOperations { private static final Logger logger = Logger.getLogger(DockerOperationsImpl.class.getName()); private static final String MANAGER_NAME = "node-admin"; private static final String IPV6_NPT_PREFIX = "fd00::"; private static final String IPV4_NPT_PREFIX = "172.17.0.0"; private final Docker docker; private final Environment environment; private final ProcessExecuter processExecuter; public DockerOperationsImpl(Docker docker, Environment environment, ProcessExecuter processExecuter) { this.docker = docker; this.environment = environment; this.processExecuter = processExecuter; } @Override public void createContainer(NodeAgentContext context, NodeSpec node, ContainerData containerData) { context.log(logger, "Creating container"); Inet6Address ipV6Address = environment.getIpAddresses().getIPv6Address(node.getHostname()).orElseThrow( () -> new RuntimeException("Unable to find a valid IPv6 address for " + node.getHostname() + ". Missing an AAAA DNS entry?")); String configServers = String.join(",", environment.getConfigServerHostNames()); Docker.CreateContainerCommand command = docker.createContainerCommand( node.getWantedDockerImage().get(), ContainerResources.from(node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()), context.containerName(), node.getHostname()) .withManagedBy(MANAGER_NAME) .withEnvironment("VESPA_CONFIGSERVERS", configServers) .withEnvironment("CONTAINER_ENVIRONMENT_SETTINGS", environment.getContainerEnvironmentResolver().createSettings(environment, node)) .withUlimit("nofile", 262_144, 262_144) .withUlimit("nproc", 32_768, 409_600) .withUlimit("core", -1, -1) .withAddCapability("SYS_PTRACE") .withAddCapability("SYS_ADMIN"); DockerNetworking networking = environment.getDockerNetworking(); command.withNetworkMode(networking.getDockerNetworkMode()); if (networking == DockerNetworking.NPT) { InetAddress ipV6Prefix = InetAddresses.forString(IPV6_NPT_PREFIX); InetAddress ipV6Local = IPAddresses.prefixTranslate(ipV6Address, ipV6Prefix, 8); command.withIpAddress(ipV6Local); Optional<InetAddress> ipV4Local = environment.getIpAddresses().getIPv4Address(node.getHostname()) .map(ipV4Address -> { InetAddress ipV4Prefix = InetAddresses.forString(IPV4_NPT_PREFIX); return IPAddresses.prefixTranslate(ipV4Address, ipV4Prefix, 2); }); ipV4Local.ifPresent(command::withIpAddress); addEtcHosts(containerData, node.getHostname(), ipV4Local, ipV6Local); } addMounts(context, command); long minMainMemoryAvailableMb = (long) (node.getMinMainMemoryAvailableGb() * 1024); if (minMainMemoryAvailableMb > 0) { command.withEnvironment("VESPA_TOTAL_MEMORY_MB", Long.toString(minMainMemoryAvailableMb)); } logger.info("Creating new container with args: " + command); command.create(); } void addEtcHosts(ContainerData containerData, String hostname, Optional<InetAddress> ipV4Local, InetAddress ipV6Local) { StringBuilder etcHosts = new StringBuilder( " "127.0.0.1\tlocalhost\n" + "::1\tlocalhost ip6-localhost ip6-loopback\n" + "fe00::0\tip6-localnet\n" + "ff00::0\tip6-mcastprefix\n" + "ff02::1\tip6-allnodes\n" + "ff02::2\tip6-allrouters\n" + ipV6Local.getHostAddress() + '\t' + hostname + '\n'); ipV4Local.ifPresent(ipv4 -> etcHosts.append(ipv4.getHostAddress() + '\t' + hostname + '\n')); containerData.addFile(Paths.get("/etc/hosts"), etcHosts.toString()); } @Override public void startContainer(NodeAgentContext context) { context.log(logger, "Starting container"); docker.startContainer(context.containerName()); } @Override public void removeContainer(NodeAgentContext context, Container container) { if (container.state.isRunning()) { context.log(logger, "Stopping container"); docker.stopContainer(context.containerName()); } context.log(logger, "Deleting container"); docker.deleteContainer(context.containerName()); } @Override public Optional<Container> getContainer(NodeAgentContext context) { return docker.getContainer(context.containerName()); } @Override public boolean pullImageAsyncIfNeeded(DockerImage dockerImage) { return docker.pullImageAsyncIfNeeded(dockerImage); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, Long timeoutSeconds, String... command) { return docker.executeInContainerAsUser(context.containerName(), "root", OptionalLong.of(timeoutSeconds), command); } @Override public ProcessResult executeCommandInContainerAsRoot(NodeAgentContext context, String... command) { return docker.executeInContainerAsUser(context.containerName(), "root", OptionalLong.empty(), command); } @Override public ProcessResult executeCommandInNetworkNamespace(ContainerName containerName, String... command) { final Integer containerPid = docker.getContainer(containerName) .filter(container -> container.state.isRunning()) .map(container -> container.pid) .orElseThrow(() -> new RuntimeException("PID not found for container with name: " + containerName.asString())); final String[] wrappedCommand = Stream.concat( Stream.of("nsenter", String.format("--net=/proc/%d/ns/net", containerPid), "--"), Stream.of(command)) .toArray(String[]::new); try { Pair<Integer, String> result = processExecuter.exec(wrappedCommand); if (result.getFirst() != 0) { throw new RuntimeException(String.format( "Failed to execute %s in network namespace for %s (PID = %d), exit code: %d, output: %s", Arrays.toString(wrappedCommand), containerName.asString(), containerPid, result.getFirst(), result.getSecond())); } return new ProcessResult(0, result.getSecond(), ""); } catch (IOException e) { throw new RuntimeException(String.format("IOException while executing %s in network namespace for %s (PID = %d)", Arrays.toString(wrappedCommand), containerName.asString(), containerPid), e); } } @Override public void resumeNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "resume"); } @Override public void suspendNode(NodeAgentContext context) { executeNodeCtlInContainer(context, "suspend"); } @Override public void restartVespa(NodeAgentContext context) { executeNodeCtlInContainer(context, "restart-vespa"); } @Override public void startServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "start"); } @Override public void stopServices(NodeAgentContext context) { executeNodeCtlInContainer(context, "stop"); } ProcessResult executeNodeCtlInContainer(NodeAgentContext context, String program) { String[] command = new String[] {context.pathInNodeUnderVespaHome("bin/vespa-nodectl").toString(), program}; ProcessResult result = executeCommandInContainerAsRoot(context, command); if (!result.isSuccess()) { throw new RuntimeException("Container " + context.containerName().asString() + ": command " + Arrays.toString(command) + " failed: " + result); } return result; } @Override public Optional<ContainerStats> getContainerStats(NodeAgentContext context) { return docker.getContainerStats(context.containerName()); } @Override public List<Container> getAllManagedContainers() { return docker.getAllContainersManagedBy(MANAGER_NAME); } /** * Returns map of directories to mount and whether they should be writable by everyone */ /** Returns whether given nodeType is a Docker host for infrastructure nodes */ private static boolean isInfrastructureHost(NodeType nodeType) { return nodeType == NodeType.confighost || nodeType == NodeType.proxyhost || nodeType == NodeType.controllerhost; } }
Should more from https://www.owasp.org/index.php/XML_External_Entity_(XXE)_Prevention_Cheat_Sheet#SAXReader be included as well?
public static void read(InputStream inputStream, FeedClient feedClient, AtomicInteger numSent) throws Exception { SAXParserFactory parserFactory = SAXParserFactory.newInstance(); parserFactory.setFeature("http: parserFactory.setValidating(false); parserFactory.setNamespaceAware(false); SAXParser parser = parserFactory.newSAXParser(); SAXClientFeeder saxClientFeeder = new SAXClientFeeder(feedClient, numSent); InputSource inputSource = new InputSource(); inputSource.setEncoding(StandardCharsets.UTF_8.displayName()); inputSource.setByteStream(inputStream); parser.setProperty("http: parser.parse(inputSource, saxClientFeeder); }
parserFactory.setFeature("http:
public static void read(InputStream inputStream, FeedClient feedClient, AtomicInteger numSent) throws Exception { SAXParserFactory parserFactory = SAXParserFactory.newInstance(); parserFactory.setFeature("http: parserFactory.setValidating(false); parserFactory.setNamespaceAware(false); SAXParser parser = parserFactory.newSAXParser(); SAXClientFeeder saxClientFeeder = new SAXClientFeeder(feedClient, numSent); InputSource inputSource = new InputSource(); inputSource.setEncoding(StandardCharsets.UTF_8.displayName()); inputSource.setByteStream(inputStream); parser.setProperty("http: parser.parse(inputSource, saxClientFeeder); }
class XmlFeedReader { private XmlFeedReader() {} }
class XmlFeedReader { private XmlFeedReader() {} }
I believe this disables all, and we don't use a SAXReader here.
public static void read(InputStream inputStream, FeedClient feedClient, AtomicInteger numSent) throws Exception { SAXParserFactory parserFactory = SAXParserFactory.newInstance(); parserFactory.setFeature("http: parserFactory.setValidating(false); parserFactory.setNamespaceAware(false); SAXParser parser = parserFactory.newSAXParser(); SAXClientFeeder saxClientFeeder = new SAXClientFeeder(feedClient, numSent); InputSource inputSource = new InputSource(); inputSource.setEncoding(StandardCharsets.UTF_8.displayName()); inputSource.setByteStream(inputStream); parser.setProperty("http: parser.parse(inputSource, saxClientFeeder); }
parserFactory.setFeature("http:
public static void read(InputStream inputStream, FeedClient feedClient, AtomicInteger numSent) throws Exception { SAXParserFactory parserFactory = SAXParserFactory.newInstance(); parserFactory.setFeature("http: parserFactory.setValidating(false); parserFactory.setNamespaceAware(false); SAXParser parser = parserFactory.newSAXParser(); SAXClientFeeder saxClientFeeder = new SAXClientFeeder(feedClient, numSent); InputSource inputSource = new InputSource(); inputSource.setEncoding(StandardCharsets.UTF_8.displayName()); inputSource.setByteStream(inputStream); parser.setProperty("http: parser.parse(inputSource, saxClientFeeder); }
class XmlFeedReader { private XmlFeedReader() {} }
class XmlFeedReader { private XmlFeedReader() {} }
Some parenthesis-wrapping would be nice here to make operator precedence more unambiguous
public ClientParameters parseCommandLineArguments(String[] args) throws IllegalArgumentException { try { CommandLineParser clp = new DefaultParser(); CommandLine cl = clp.parse(options, args); boolean printIdsOnly = cl.hasOption(PRINTIDS_OPTION); boolean headersOnly = cl.hasOption(HEADERSONLY_OPTION); String fieldSet = cl.getOptionValue(FIELDSET_OPTION, ""); String cluster = cl.getOptionValue(CLUSTER_OPTION, ""); String route = cl.getOptionValue(ROUTE_OPTION, ""); String configId = cl.getOptionValue(CONFIGID_OPTION, ""); boolean help = cl.hasOption(HELP_OPTION); String loadtype = cl.getOptionValue(LOADTYPE_OPTION, ""); boolean noRetry = cl.hasOption(NORETRY_OPTION); boolean showDocSize = cl.hasOption(SHOWDOCSIZE_OPTION); boolean jsonOutput = cl.hasOption(JSONOUTPUT_OPTION); boolean xmlOutput = cl.hasOption(XMLOUTPUT_OPTION); int trace = getTrace(cl); DocumentProtocol.Priority priority = getPriority(cl); double timeout = getTimeout(cl); Iterator<String> documentIds = getDocumentIds(cl); if (jsonOutput && xmlOutput) { throw new IllegalArgumentException("Cannot combine both xml and json output"); } if (printIdsOnly && headersOnly) { throw new IllegalArgumentException("Print ids and headers only options are mutually exclusive."); } if ((printIdsOnly || headersOnly) && !fieldSet.isEmpty()) { throw new IllegalArgumentException("Field set option can not be used in combination with print ids or headers only options."); } if (printIdsOnly) { fieldSet = "[id]"; } else if (headersOnly) { fieldSet = "[header]"; } else if (fieldSet.isEmpty()) { fieldSet = "[all]"; } if (!cluster.isEmpty() && !route.isEmpty()) { throw new IllegalArgumentException("Cluster and route options are mutually exclusive."); } if (route.isEmpty() && cluster.isEmpty()) { route = "default"; } if (trace < 0 || trace > 9) { throw new IllegalArgumentException("Invalid tracelevel: " + trace); } if (configId.isEmpty()) { configId = "client"; } ClientParameters.Builder paramsBuilder = new ClientParameters.Builder(); return paramsBuilder .setDocumentIds(documentIds) .setConfigId(configId) .setFieldSet(fieldSet) .setHelp(help) .setPrintIdsOnly(printIdsOnly) .setLoadTypeName(loadtype) .setNoRetry(noRetry) .setCluster(cluster) .setRoute(route) .setShowDocSize(showDocSize) .setTraceLevel(trace) .setPriority(priority) .setTimeout(timeout) .setJsonOutput(!jsonOutput && !xmlOutput ? false : jsonOutput) .build(); } catch (ParseException pe) { throw new IllegalArgumentException(pe.getMessage()); } }
.setJsonOutput(!jsonOutput && !xmlOutput ? false : jsonOutput)
public ClientParameters parseCommandLineArguments(String[] args) throws IllegalArgumentException { try { CommandLineParser clp = new DefaultParser(); CommandLine cl = clp.parse(options, args); boolean printIdsOnly = cl.hasOption(PRINTIDS_OPTION); boolean headersOnly = cl.hasOption(HEADERSONLY_OPTION); String fieldSet = cl.getOptionValue(FIELDSET_OPTION, ""); String cluster = cl.getOptionValue(CLUSTER_OPTION, ""); String route = cl.getOptionValue(ROUTE_OPTION, ""); String configId = cl.getOptionValue(CONFIGID_OPTION, ""); boolean help = cl.hasOption(HELP_OPTION); String loadtype = cl.getOptionValue(LOADTYPE_OPTION, ""); boolean noRetry = cl.hasOption(NORETRY_OPTION); boolean showDocSize = cl.hasOption(SHOWDOCSIZE_OPTION); boolean jsonOutput = cl.hasOption(JSONOUTPUT_OPTION); boolean xmlOutput = cl.hasOption(XMLOUTPUT_OPTION); int trace = getTrace(cl); DocumentProtocol.Priority priority = getPriority(cl); double timeout = getTimeout(cl); Iterator<String> documentIds = getDocumentIds(cl); if (jsonOutput && xmlOutput) { throw new IllegalArgumentException("Cannot combine both xml and json output"); } if (printIdsOnly && headersOnly) { throw new IllegalArgumentException("Print ids and headers only options are mutually exclusive."); } if ((printIdsOnly || headersOnly) && !fieldSet.isEmpty()) { throw new IllegalArgumentException("Field set option can not be used in combination with print ids or headers only options."); } if (printIdsOnly) { fieldSet = "[id]"; } else if (headersOnly) { fieldSet = "[header]"; } else if (fieldSet.isEmpty()) { fieldSet = "[all]"; } if (!cluster.isEmpty() && !route.isEmpty()) { throw new IllegalArgumentException("Cluster and route options are mutually exclusive."); } if (route.isEmpty() && cluster.isEmpty()) { route = "default"; } if (trace < 0 || trace > 9) { throw new IllegalArgumentException("Invalid tracelevel: " + trace); } if (configId.isEmpty()) { configId = "client"; } ClientParameters.Builder paramsBuilder = new ClientParameters.Builder(); return paramsBuilder .setDocumentIds(documentIds) .setConfigId(configId) .setFieldSet(fieldSet) .setHelp(help) .setPrintIdsOnly(printIdsOnly) .setLoadTypeName(loadtype) .setNoRetry(noRetry) .setCluster(cluster) .setRoute(route) .setShowDocSize(showDocSize) .setTraceLevel(trace) .setPriority(priority) .setTimeout(timeout) .setJsonOutput(!jsonOutput && !xmlOutput ? false : jsonOutput) .build(); } catch (ParseException pe) { throw new IllegalArgumentException(pe.getMessage()); } }
class CommandLineOptions { public static final String HELP_OPTION = "help"; public static final String PRINTIDS_OPTION = "printids"; public static final String HEADERSONLY_OPTION = "headersonly"; public static final String FIELDSET_OPTION = "fieldset"; public static final String CLUSTER_OPTION = "cluster"; public static final String ROUTE_OPTION = "route"; public static final String CONFIGID_OPTION = "configid"; public static final String SHOWDOCSIZE_OPTION = "showdocsize"; public static final String TIMEOUT_OPTION = "timeout"; public static final String NORETRY_OPTION = "noretry"; public static final String TRACE_OPTION = "trace"; public static final String PRIORITY_OPTION = "priority"; public static final String LOADTYPE_OPTION = "loadtype"; public static final String JSONOUTPUT_OPTION = "jsonoutput"; public static final String XMLOUTPUT_OPTION = "xmloutput"; private final Options options = createOptions(); private final InputStream stdIn; public CommandLineOptions(InputStream stdIn) { this.stdIn = stdIn; } public CommandLineOptions() { this(System.in); } @SuppressWarnings("AccessStaticViaInstance") private static Options createOptions() { Options options = new Options(); options.addOption(Option.builder("h") .hasArg(false) .desc("Show this syntax page.") .longOpt(HELP_OPTION) .build()); options.addOption(Option.builder("i") .hasArg(false) .desc("Show only identifiers of retrieved documents.") .longOpt(PRINTIDS_OPTION) .build()); options.addOption(Option.builder("e") .hasArg(false) .desc("Retrieve header fields only. [Deprecated].") .longOpt(HEADERSONLY_OPTION).build()); options.addOption(Option.builder("f") .hasArg(true) .desc("Retrieve the specified fields only (see http: .longOpt(FIELDSET_OPTION) .argName("fieldset").build()); options.addOption(Option.builder("u") .hasArg(true) .desc("Send request to the given content cluster.") .longOpt(CLUSTER_OPTION) .argName("cluster").build()); options.addOption(Option.builder("r") .hasArg(true) .desc("Send request to the given messagebus route.") .longOpt(ROUTE_OPTION) .argName("route").build()); options.addOption(Option.builder("c") .hasArg(true) .desc("Use the specified config id for messagebus configuration.") .longOpt(CONFIGID_OPTION) .argName("configid").build()); options.addOption(Option.builder("s") .hasArg(false) .desc("Show binary size of document.") .longOpt(SHOWDOCSIZE_OPTION).build()); options.addOption(Option.builder("t") .hasArg(true) .desc("Set timeout for the request in seconds (default 0).") .longOpt(TIMEOUT_OPTION) .argName("timeout") .type(Number.class).build()); options.addOption(Option.builder("n") .hasArg(false) .desc("Do not retry operation on transient errors, as is default.") .longOpt(NORETRY_OPTION).build()); options.addOption(Option.builder("a") .hasArg(true) .desc("Trace level to use (default 0).") .longOpt(TRACE_OPTION) .argName("trace") .type(Number.class).build()); options.addOption(Option.builder("p") .hasArg(true) .desc("Priority (default 6).") .longOpt(PRIORITY_OPTION) .argName("priority") .type(Number.class).build()); options.addOption(Option.builder("l") .hasArg(true) .desc("Load type (default \"\").") .longOpt(LOADTYPE_OPTION) .argName("loadtype").build()); options.addOption(Option.builder("j") .hasArg(false) .desc("JSON output") .longOpt(JSONOUTPUT_OPTION).build()); options.addOption(Option.builder("x") .hasArg(false) .desc("XML output (default format)") .longOpt(XMLOUTPUT_OPTION).build()); return options; } public void printHelp() { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp( "vespa-get <options> [documentid...]", "Fetch a document from a Vespa Content cluster.", options, "If one or more document identifier are specified, these documents will be " + "retrieved. Otherwise, document identifiers (separated with line break) will be read from standard in.\n", false); } private Iterator<String> getDocumentIds(CommandLine cl) { List<String> documentIds = Arrays.asList(cl.getArgs()); if (documentIds.isEmpty() || documentIds.size() == 1 && documentIds.get(0).isEmpty()) { return new Scanner(stdIn); } else { return documentIds.iterator(); } } private static double getTimeout(CommandLine cl) throws ParseException { Number timeoutObj = (Number) cl.getParsedOptionValue(TIMEOUT_OPTION); return timeoutObj != null ? timeoutObj.doubleValue() : 0; } private static int getTrace(CommandLine cl) throws ParseException { Number traceObj = (Number) cl.getParsedOptionValue(TRACE_OPTION); return traceObj != null ? traceObj.intValue() : 0; } private static DocumentProtocol.Priority getPriority(CommandLine cl) throws ParseException { Number priorityObj = (Number) cl.getParsedOptionValue(PRIORITY_OPTION); int priorityNumber = priorityObj != null ? priorityObj.intValue() : DocumentProtocol.Priority.NORMAL_2.getValue(); return parsePriority(priorityNumber); } private static DocumentProtocol.Priority parsePriority(int n) { for (DocumentProtocol.Priority priority : DocumentProtocol.Priority.values()) { if (priority.getValue() == n) { return priority; } } throw new IllegalArgumentException("Invalid priority: " + n); } }
class CommandLineOptions { public static final String HELP_OPTION = "help"; public static final String PRINTIDS_OPTION = "printids"; public static final String HEADERSONLY_OPTION = "headersonly"; public static final String FIELDSET_OPTION = "fieldset"; public static final String CLUSTER_OPTION = "cluster"; public static final String ROUTE_OPTION = "route"; public static final String CONFIGID_OPTION = "configid"; public static final String SHOWDOCSIZE_OPTION = "showdocsize"; public static final String TIMEOUT_OPTION = "timeout"; public static final String NORETRY_OPTION = "noretry"; public static final String TRACE_OPTION = "trace"; public static final String PRIORITY_OPTION = "priority"; public static final String LOADTYPE_OPTION = "loadtype"; public static final String JSONOUTPUT_OPTION = "jsonoutput"; public static final String XMLOUTPUT_OPTION = "xmloutput"; private final Options options = createOptions(); private final InputStream stdIn; public CommandLineOptions(InputStream stdIn) { this.stdIn = stdIn; } public CommandLineOptions() { this(System.in); } @SuppressWarnings("AccessStaticViaInstance") private static Options createOptions() { Options options = new Options(); options.addOption(Option.builder("h") .hasArg(false) .desc("Show this syntax page.") .longOpt(HELP_OPTION) .build()); options.addOption(Option.builder("i") .hasArg(false) .desc("Show only identifiers of retrieved documents.") .longOpt(PRINTIDS_OPTION) .build()); options.addOption(Option.builder("e") .hasArg(false) .desc("Retrieve header fields only. [Deprecated].") .longOpt(HEADERSONLY_OPTION).build()); options.addOption(Option.builder("f") .hasArg(true) .desc("Retrieve the specified fields only (see http: .longOpt(FIELDSET_OPTION) .argName("fieldset").build()); options.addOption(Option.builder("u") .hasArg(true) .desc("Send request to the given content cluster.") .longOpt(CLUSTER_OPTION) .argName("cluster").build()); options.addOption(Option.builder("r") .hasArg(true) .desc("Send request to the given messagebus route.") .longOpt(ROUTE_OPTION) .argName("route").build()); options.addOption(Option.builder("c") .hasArg(true) .desc("Use the specified config id for messagebus configuration.") .longOpt(CONFIGID_OPTION) .argName("configid").build()); options.addOption(Option.builder("s") .hasArg(false) .desc("Show binary size of document.") .longOpt(SHOWDOCSIZE_OPTION).build()); options.addOption(Option.builder("t") .hasArg(true) .desc("Set timeout for the request in seconds (default 0).") .longOpt(TIMEOUT_OPTION) .argName("timeout") .type(Number.class).build()); options.addOption(Option.builder("n") .hasArg(false) .desc("Do not retry operation on transient errors, as is default.") .longOpt(NORETRY_OPTION).build()); options.addOption(Option.builder("a") .hasArg(true) .desc("Trace level to use (default 0).") .longOpt(TRACE_OPTION) .argName("trace") .type(Number.class).build()); options.addOption(Option.builder("p") .hasArg(true) .desc("Priority (default 6).") .longOpt(PRIORITY_OPTION) .argName("priority") .type(Number.class).build()); options.addOption(Option.builder("l") .hasArg(true) .desc("Load type (default \"\").") .longOpt(LOADTYPE_OPTION) .argName("loadtype").build()); options.addOption(Option.builder("j") .hasArg(false) .desc("JSON output") .longOpt(JSONOUTPUT_OPTION).build()); options.addOption(Option.builder("x") .hasArg(false) .desc("XML output (default format)") .longOpt(XMLOUTPUT_OPTION).build()); return options; } public void printHelp() { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp( "vespa-get <options> [documentid...]", "Fetch a document from a Vespa Content cluster.", options, "If one or more document identifier are specified, these documents will be " + "retrieved. Otherwise, document identifiers (separated with line break) will be read from standard in.\n", false); } private Iterator<String> getDocumentIds(CommandLine cl) { List<String> documentIds = Arrays.asList(cl.getArgs()); if (documentIds.isEmpty() || documentIds.size() == 1 && documentIds.get(0).isEmpty()) { return new Scanner(stdIn); } else { return documentIds.iterator(); } } private static double getTimeout(CommandLine cl) throws ParseException { Number timeoutObj = (Number) cl.getParsedOptionValue(TIMEOUT_OPTION); return timeoutObj != null ? timeoutObj.doubleValue() : 0; } private static int getTrace(CommandLine cl) throws ParseException { Number traceObj = (Number) cl.getParsedOptionValue(TRACE_OPTION); return traceObj != null ? traceObj.intValue() : 0; } private static DocumentProtocol.Priority getPriority(CommandLine cl) throws ParseException { Number priorityObj = (Number) cl.getParsedOptionValue(PRIORITY_OPTION); int priorityNumber = priorityObj != null ? priorityObj.intValue() : DocumentProtocol.Priority.NORMAL_2.getValue(); return parsePriority(priorityNumber); } private static DocumentProtocol.Priority parsePriority(int n) { for (DocumentProtocol.Priority priority : DocumentProtocol.Priority.values()) { if (priority.getValue() == n) { return priority; } } throw new IllegalArgumentException("Invalid priority: " + n); } }
Same here, re: operator precedence
public VdsVisitParameters parse(String args[]) throws org.apache.commons.cli.ParseException { VdsVisitParameters allParams = new VdsVisitParameters(); VisitorParameters params = new VisitorParameters(""); CommandLineParser parser = new DefaultParser(); CommandLine line = parser.parse(options, args); if (line.hasOption("h")) { return null; } if (line.hasOption("d")) { params.setRemoteDataHandler(line.getOptionValue("d")); } if (line.hasOption("s")) { params.setDocumentSelection(line.getOptionValue("s")); } if (line.hasOption("bucketspace")) { params.setBucketSpace(line.getOptionValue("bucketspace")); } if (line.hasOption("f")) { params.setFromTimestamp(((Number) line.getParsedOptionValue("f")).longValue()); } if (line.hasOption("t")) { params.setToTimestamp(((Number) line.getParsedOptionValue("t")).longValue()); } if (line.hasOption("e")) { params.fieldSet("[header]"); } if (line.hasOption("l")) { params.fieldSet(line.getOptionValue("l")); } if (line.hasOption("visitinconsistentbuckets")) { params.visitInconsistentBuckets(true); } if (line.hasOption("m")) { params.setMaxPending(((Number) line.getParsedOptionValue("m")).intValue()); } if (line.hasOption("b")) { params.setMaxBucketsPerVisitor(((Number) line.getParsedOptionValue("b")).intValue()); } if (line.hasOption("i")) { allParams.setPrintIdsOnly(true); params.fieldSet("[id]"); } if (line.hasOption("p")) { params.setResumeFileName(line.getOptionValue("p")); } if (line.hasOption("o")) { allParams.setFullTimeout(((Number) line.getParsedOptionValue("o")).intValue()); params.setTimeoutMs(allParams.getFullTimeout()); } if (line.hasOption("u")) { params.setTimeoutMs(((Number) line.getParsedOptionValue("u")).intValue()); } if (line.hasOption("visitlibrary")) { params.setVisitorLibrary(line.getOptionValue("visitlibrary")); } if (line.hasOption("libraryparam")) { String key = line.getOptionValues("libraryparam")[0]; String value = line.getOptionValues("libraryparam")[1]; params.setLibraryParameter(key, value); } if (line.hasOption("r")) { params.visitRemoves(true); } if (line.hasOption("c")) { allParams.setCluster(line.getOptionValue("c")); } if (line.hasOption("v")) { allParams.setVerbose(true); } if (line.hasOption("statistics")) { allParams.setStatisticsParts(line.getOptionValue("statistics")); params.fieldSet("[id]"); params.setVisitorLibrary("CountVisitor"); } if (line.hasOption("abortonclusterdown")) { allParams.setAbortOnClusterDown(true); } if (line.hasOption("processtime")) { allParams.setProcessTime(((Number) line.getParsedOptionValue("processtime")).intValue()); } if (line.hasOption("maxhits")) { params.setMaxFirstPassHits(((Number)line.getParsedOptionValue("maxhits")).intValue()); } if (line.hasOption("maxtotalhits")) { params.setMaxTotalHits(((Number)line.getParsedOptionValue("maxtotalhits")).intValue()); } if (line.hasOption("tracelevel")) { params.setTraceLevel(((Number)line.getParsedOptionValue("tracelevel")).intValue()); } if (line.hasOption("priority")) { try { DocumentProtocol.Priority priority = DocumentProtocol.getPriorityByName( line.getOptionValue("priority")); params.setPriority(priority); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Unknown priority name"); } } else { params.setPriority(DocumentProtocol.Priority.LOW_1); } if (line.hasOption("ordering")) { String opt = line.getOptionValue("ordering"); if (opt.equalsIgnoreCase("ascending")) { params.setVisitorOrdering(OrderingSpecification.ASCENDING); } else if (opt.equalsIgnoreCase("descending")) { params.setVisitorOrdering(OrderingSpecification.DESCENDING); } else { throw new IllegalArgumentException("Unknown ordering. Legal values are \"ascending\", \"descending\""); } } if (line.hasOption("skipbucketsonfatalerrors")) { params.skipBucketsOnFatalErrors(true); } if (line.hasOption("maxpendingsuperbuckets")) { StaticThrottlePolicy throttlePolicy = new StaticThrottlePolicy(); throttlePolicy.setMaxPendingCount(((Number)line.getParsedOptionValue("maxpendingsuperbuckets")).intValue()); params.setThrottlePolicy(throttlePolicy); } boolean jsonOutput = line.hasOption("jsonoutput"); boolean xmlOutput = line.hasOption("xmloutput"); if (jsonOutput && xmlOutput) { throw new IllegalArgumentException("Cannot combine both xml and json output"); } allParams.setJsonOutput(!jsonOutput && !xmlOutput ? false : jsonOutput); allParams.setVisitorParameters(params); return allParams; }
allParams.setJsonOutput(!jsonOutput && !xmlOutput ? false : jsonOutput);
public VdsVisitParameters parse(String args[]) throws org.apache.commons.cli.ParseException { VdsVisitParameters allParams = new VdsVisitParameters(); VisitorParameters params = new VisitorParameters(""); CommandLineParser parser = new DefaultParser(); CommandLine line = parser.parse(options, args); if (line.hasOption("h")) { return null; } if (line.hasOption("d")) { params.setRemoteDataHandler(line.getOptionValue("d")); } if (line.hasOption("s")) { params.setDocumentSelection(line.getOptionValue("s")); } if (line.hasOption("bucketspace")) { params.setBucketSpace(line.getOptionValue("bucketspace")); } if (line.hasOption("f")) { params.setFromTimestamp(((Number) line.getParsedOptionValue("f")).longValue()); } if (line.hasOption("t")) { params.setToTimestamp(((Number) line.getParsedOptionValue("t")).longValue()); } if (line.hasOption("e")) { params.fieldSet("[header]"); } if (line.hasOption("l")) { params.fieldSet(line.getOptionValue("l")); } if (line.hasOption("visitinconsistentbuckets")) { params.visitInconsistentBuckets(true); } if (line.hasOption("m")) { params.setMaxPending(((Number) line.getParsedOptionValue("m")).intValue()); } if (line.hasOption("b")) { params.setMaxBucketsPerVisitor(((Number) line.getParsedOptionValue("b")).intValue()); } if (line.hasOption("i")) { allParams.setPrintIdsOnly(true); params.fieldSet("[id]"); } if (line.hasOption("p")) { params.setResumeFileName(line.getOptionValue("p")); } if (line.hasOption("o")) { allParams.setFullTimeout(((Number) line.getParsedOptionValue("o")).intValue()); params.setTimeoutMs(allParams.getFullTimeout()); } if (line.hasOption("u")) { params.setTimeoutMs(((Number) line.getParsedOptionValue("u")).intValue()); } if (line.hasOption("visitlibrary")) { params.setVisitorLibrary(line.getOptionValue("visitlibrary")); } if (line.hasOption("libraryparam")) { String key = line.getOptionValues("libraryparam")[0]; String value = line.getOptionValues("libraryparam")[1]; params.setLibraryParameter(key, value); } if (line.hasOption("r")) { params.visitRemoves(true); } if (line.hasOption("c")) { allParams.setCluster(line.getOptionValue("c")); } if (line.hasOption("v")) { allParams.setVerbose(true); } if (line.hasOption("statistics")) { allParams.setStatisticsParts(line.getOptionValue("statistics")); params.fieldSet("[id]"); params.setVisitorLibrary("CountVisitor"); } if (line.hasOption("abortonclusterdown")) { allParams.setAbortOnClusterDown(true); } if (line.hasOption("processtime")) { allParams.setProcessTime(((Number) line.getParsedOptionValue("processtime")).intValue()); } if (line.hasOption("maxhits")) { params.setMaxFirstPassHits(((Number)line.getParsedOptionValue("maxhits")).intValue()); } if (line.hasOption("maxtotalhits")) { params.setMaxTotalHits(((Number)line.getParsedOptionValue("maxtotalhits")).intValue()); } if (line.hasOption("tracelevel")) { params.setTraceLevel(((Number)line.getParsedOptionValue("tracelevel")).intValue()); } if (line.hasOption("priority")) { try { DocumentProtocol.Priority priority = DocumentProtocol.getPriorityByName( line.getOptionValue("priority")); params.setPriority(priority); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Unknown priority name"); } } else { params.setPriority(DocumentProtocol.Priority.LOW_1); } if (line.hasOption("ordering")) { String opt = line.getOptionValue("ordering"); if (opt.equalsIgnoreCase("ascending")) { params.setVisitorOrdering(OrderingSpecification.ASCENDING); } else if (opt.equalsIgnoreCase("descending")) { params.setVisitorOrdering(OrderingSpecification.DESCENDING); } else { throw new IllegalArgumentException("Unknown ordering. Legal values are \"ascending\", \"descending\""); } } if (line.hasOption("skipbucketsonfatalerrors")) { params.skipBucketsOnFatalErrors(true); } if (line.hasOption("maxpendingsuperbuckets")) { StaticThrottlePolicy throttlePolicy = new StaticThrottlePolicy(); throttlePolicy.setMaxPendingCount(((Number)line.getParsedOptionValue("maxpendingsuperbuckets")).intValue()); params.setThrottlePolicy(throttlePolicy); } boolean jsonOutput = line.hasOption("jsonoutput"); boolean xmlOutput = line.hasOption("xmloutput"); if (jsonOutput && xmlOutput) { throw new IllegalArgumentException("Cannot combine both xml and json output"); } allParams.setJsonOutput(!jsonOutput && !xmlOutput ? false : jsonOutput); allParams.setVisitorParameters(params); return allParams; }
class ArgumentParser { private Options options; public ArgumentParser(Options options) { this.options = options; } }
class ArgumentParser { private Options options; public ArgumentParser(Options options) { this.options = options; } }
This works similarly to `find dir -type f -exec rm -r {} \;` (will delete all files under container's `/var/lib/sia`)
public void clearCredentials(NodeAgentContext context) { FileFinder.files(context.pathOnHostFromPathInNode(CONTAINER_SIA_DIRECTORY)) .deleteRecursively(); lastRefreshAttempt.remove(context.containerName()); }
FileFinder.files(context.pathOnHostFromPathInNode(CONTAINER_SIA_DIRECTORY))
public void clearCredentials(NodeAgentContext context) { FileFinder.files(context.pathOnHostFromPathInNode(CONTAINER_SIA_DIRECTORY)) .deleteRecursively(); lastRefreshAttempt.remove(context.containerName()); }
class AthenzCredentialsMaintainer { private static final Logger logger = Logger.getLogger(AthenzCredentialsMaintainer.class.getName()); private static final Duration EXPIRY_MARGIN = Duration.ofDays(1); private static final Duration REFRESH_PERIOD = Duration.ofDays(1); private static final Duration REFRESH_BACKOFF = Duration.ofHours(1); private static final Path CONTAINER_SIA_DIRECTORY = Paths.get("/var/lib/sia"); private final URI ztsEndpoint; private final Path trustStorePath; private final AthenzService configserverIdentity; private final Clock clock; private final ServiceIdentityProvider hostIdentityProvider; private final IdentityDocumentClient identityDocumentClient; private final CsrGenerator csrGenerator; private Map<ContainerName, Instant> lastRefreshAttempt = new ConcurrentHashMap<>(); public AthenzCredentialsMaintainer(URI ztsEndpoint, Path trustStorePath, ConfigServerInfo configServerInfo, String certificateDnsSuffix, ServiceIdentityProvider hostIdentityProvider) { this.ztsEndpoint = ztsEndpoint; this.trustStorePath = trustStorePath; this.configserverIdentity = configServerInfo.getConfigServerIdentity(); this.csrGenerator = new CsrGenerator(certificateDnsSuffix, configserverIdentity.getFullName()); this.hostIdentityProvider = hostIdentityProvider; this.identityDocumentClient = new DefaultIdentityDocumentClient( configServerInfo.getLoadBalancerEndpoint(), hostIdentityProvider, new AthenzIdentityVerifier(singleton(configserverIdentity))); this.clock = Clock.systemUTC(); } public void converge(NodeAgentContext context) { try { context.log(logger, LogLevel.DEBUG, "Checking certificate"); Path containerSiaDirectory = context.pathOnHostFromPathInNode(CONTAINER_SIA_DIRECTORY); Path privateKeyFile = SiaUtils.getPrivateKeyFile(containerSiaDirectory, context.identity()); Path certificateFile = SiaUtils.getCertificateFile(containerSiaDirectory, context.identity()); Path identityDocumentFile = containerSiaDirectory.resolve("vespa-node-identity-document.json"); if (!Files.exists(privateKeyFile) || !Files.exists(certificateFile) || !Files.exists(identityDocumentFile)) { context.log(logger, "Certificate/private key/identity document file does not exist"); Files.createDirectories(privateKeyFile.getParent()); Files.createDirectories(certificateFile.getParent()); Files.createDirectories(identityDocumentFile.getParent()); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile); return; } X509Certificate certificate = readCertificateFromFile(certificateFile); Instant now = clock.instant(); Instant expiry = certificate.getNotAfter().toInstant(); if (isCertificateExpired(expiry, now)) { context.log(logger, "Certificate has expired (expiry=%s)", expiry.toString()); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile); return; } Duration age = Duration.between(certificate.getNotBefore().toInstant(), now); if (shouldRefreshCredentials(age)) { context.log(logger, "Certificate is ready to be refreshed (age=%s)", age.toString()); if (shouldThrottleRefreshAttempts(context, now)) { context.log(logger, LogLevel.WARNING, String.format( "Skipping refresh attempt as last refresh was on %s (less than %s ago)", lastRefreshAttempt.toString(), REFRESH_BACKOFF.toString())); return; } else { lastRefreshAttempt.put(context.containerName(), now); refreshIdentity(context, privateKeyFile, certificateFile, identityDocumentFile); return; } } context.log(logger, LogLevel.DEBUG, "Certificate is still valid"); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean shouldRefreshCredentials(Duration age) { return age.compareTo(REFRESH_PERIOD) >= 0; } private boolean shouldThrottleRefreshAttempts(NodeAgentContext context, Instant now) { return REFRESH_BACKOFF.compareTo( Duration.between( lastRefreshAttempt.getOrDefault(context.containerName(), Instant.EPOCH), now)) > 0; } @SuppressWarnings("deprecation") private void registerIdentity(NodeAgentContext context, Path privateKeyFile, Path certificateFile, Path identityDocumentFile) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); SignedIdentityDocument signedIdentityDocument = identityDocumentClient.getNodeIdentityDocument(context.hostname().value()); com.yahoo.vespa.athenz.tls.Pkcs10Csr csr = csrGenerator.generateInstanceCsr( context.identity(), signedIdentityDocument.providerUniqueId(), signedIdentityDocument.ipAddresses(), keyPair); try (ZtsClient ztsClient = new DefaultZtsClient(ztsEndpoint, hostIdentityProvider)) { InstanceIdentity instanceIdentity = ztsClient.registerInstance( configserverIdentity, context.identity(), signedIdentityDocument.providerUniqueId().asDottedString(), EntityBindingsMapper.toAttestationData(signedIdentityDocument), false, csr); EntityBindingsMapper.writeSignedIdentityDocumentToFile(identityDocumentFile, signedIdentityDocument); writePrivateKeyAndCertificate(privateKeyFile, keyPair.getPrivate(), certificateFile, instanceIdentity.certificate()); context.log(logger, "Instance successfully registered and credentials written to file"); } catch (IOException e) { throw new UncheckedIOException(e); } } @SuppressWarnings("deprecation") private void refreshIdentity(NodeAgentContext context, Path privateKeyFile, Path certificateFile, Path identityDocumentFile) { SignedIdentityDocument identityDocument = EntityBindingsMapper.readSignedIdentityDocumentFromFile(identityDocumentFile); KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); com.yahoo.vespa.athenz.tls.Pkcs10Csr csr = csrGenerator.generateInstanceCsr( context.identity(), identityDocument.providerUniqueId(), identityDocument.ipAddresses(), keyPair); SSLContext containerIdentitySslContext = new SslContextBuilder() .withKeyStore(privateKeyFile, certificateFile) .withTrustStore(trustStorePath, KeyStoreType.JKS) .build(); try { try (ZtsClient ztsClient = new DefaultZtsClient(ztsEndpoint, context.identity(), containerIdentitySslContext)) { InstanceIdentity instanceIdentity = ztsClient.refreshInstance( configserverIdentity, context.identity(), identityDocument.providerUniqueId().asDottedString(), false, csr); writePrivateKeyAndCertificate(privateKeyFile, keyPair.getPrivate(), certificateFile, instanceIdentity.certificate()); context.log(logger, "Instance successfully refreshed and credentials written to file"); } catch (ZtsClientException e) { if (e.getErrorCode() == 403 && e.getDescription().startsWith("Certificate revoked")) { context.log(logger, LogLevel.ERROR, "Certificate cannot be refreshed as it is revoked by ZTS - re-registering the instance now", e); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile); } else { throw e; } } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Certificate refresh failed: " + e.getMessage(), e); } } private static void writePrivateKeyAndCertificate( Path privateKeyFile, PrivateKey privateKey, Path certificateFile, X509Certificate certificate) throws IOException { Path tempPrivateKeyFile = toTempPath(privateKeyFile); Files.write(tempPrivateKeyFile, KeyUtils.toPem(privateKey).getBytes()); Path tempCertificateFile = toTempPath(certificateFile); Files.write(tempCertificateFile, X509CertificateUtils.toPem(certificate).getBytes()); Files.move(tempPrivateKeyFile, privateKeyFile, StandardCopyOption.ATOMIC_MOVE); Files.move(tempCertificateFile, certificateFile, StandardCopyOption.ATOMIC_MOVE); } private static Path toTempPath(Path file) { return Paths.get(file.toAbsolutePath().toString() + ".tmp"); } private static X509Certificate readCertificateFromFile(Path certificateFile) throws IOException { String pemEncodedCertificate = new String(Files.readAllBytes(certificateFile)); return X509CertificateUtils.fromPem(pemEncodedCertificate); } private static boolean isCertificateExpired(Instant expiry, Instant now) { return now.isAfter(expiry.minus(EXPIRY_MARGIN)); } }
class AthenzCredentialsMaintainer { private static final Logger logger = Logger.getLogger(AthenzCredentialsMaintainer.class.getName()); private static final Duration EXPIRY_MARGIN = Duration.ofDays(1); private static final Duration REFRESH_PERIOD = Duration.ofDays(1); private static final Duration REFRESH_BACKOFF = Duration.ofHours(1); private static final Path CONTAINER_SIA_DIRECTORY = Paths.get("/var/lib/sia"); private final URI ztsEndpoint; private final Path trustStorePath; private final AthenzService configserverIdentity; private final Clock clock; private final ServiceIdentityProvider hostIdentityProvider; private final IdentityDocumentClient identityDocumentClient; private final CsrGenerator csrGenerator; private Map<ContainerName, Instant> lastRefreshAttempt = new ConcurrentHashMap<>(); public AthenzCredentialsMaintainer(URI ztsEndpoint, Path trustStorePath, ConfigServerInfo configServerInfo, String certificateDnsSuffix, ServiceIdentityProvider hostIdentityProvider) { this.ztsEndpoint = ztsEndpoint; this.trustStorePath = trustStorePath; this.configserverIdentity = configServerInfo.getConfigServerIdentity(); this.csrGenerator = new CsrGenerator(certificateDnsSuffix, configserverIdentity.getFullName()); this.hostIdentityProvider = hostIdentityProvider; this.identityDocumentClient = new DefaultIdentityDocumentClient( configServerInfo.getLoadBalancerEndpoint(), hostIdentityProvider, new AthenzIdentityVerifier(singleton(configserverIdentity))); this.clock = Clock.systemUTC(); } public void converge(NodeAgentContext context) { try { context.log(logger, LogLevel.DEBUG, "Checking certificate"); Path containerSiaDirectory = context.pathOnHostFromPathInNode(CONTAINER_SIA_DIRECTORY); Path privateKeyFile = SiaUtils.getPrivateKeyFile(containerSiaDirectory, context.identity()); Path certificateFile = SiaUtils.getCertificateFile(containerSiaDirectory, context.identity()); Path identityDocumentFile = containerSiaDirectory.resolve("vespa-node-identity-document.json"); if (!Files.exists(privateKeyFile) || !Files.exists(certificateFile) || !Files.exists(identityDocumentFile)) { context.log(logger, "Certificate/private key/identity document file does not exist"); Files.createDirectories(privateKeyFile.getParent()); Files.createDirectories(certificateFile.getParent()); Files.createDirectories(identityDocumentFile.getParent()); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile); return; } X509Certificate certificate = readCertificateFromFile(certificateFile); Instant now = clock.instant(); Instant expiry = certificate.getNotAfter().toInstant(); if (isCertificateExpired(expiry, now)) { context.log(logger, "Certificate has expired (expiry=%s)", expiry.toString()); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile); return; } Duration age = Duration.between(certificate.getNotBefore().toInstant(), now); if (shouldRefreshCredentials(age)) { context.log(logger, "Certificate is ready to be refreshed (age=%s)", age.toString()); if (shouldThrottleRefreshAttempts(context.containerName(), now)) { context.log(logger, LogLevel.WARNING, String.format( "Skipping refresh attempt as last refresh was on %s (less than %s ago)", lastRefreshAttempt.get(context.containerName()).toString(), REFRESH_BACKOFF.toString())); return; } else { lastRefreshAttempt.put(context.containerName(), now); refreshIdentity(context, privateKeyFile, certificateFile, identityDocumentFile); return; } } context.log(logger, LogLevel.DEBUG, "Certificate is still valid"); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean shouldRefreshCredentials(Duration age) { return age.compareTo(REFRESH_PERIOD) >= 0; } private boolean shouldThrottleRefreshAttempts(ContainerName containerName, Instant now) { return REFRESH_BACKOFF.compareTo( Duration.between( lastRefreshAttempt.getOrDefault(containerName, Instant.EPOCH), now)) > 0; } @SuppressWarnings("deprecation") private void registerIdentity(NodeAgentContext context, Path privateKeyFile, Path certificateFile, Path identityDocumentFile) { KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); SignedIdentityDocument signedIdentityDocument = identityDocumentClient.getNodeIdentityDocument(context.hostname().value()); com.yahoo.vespa.athenz.tls.Pkcs10Csr csr = csrGenerator.generateInstanceCsr( context.identity(), signedIdentityDocument.providerUniqueId(), signedIdentityDocument.ipAddresses(), keyPair); try (ZtsClient ztsClient = new DefaultZtsClient(ztsEndpoint, hostIdentityProvider)) { InstanceIdentity instanceIdentity = ztsClient.registerInstance( configserverIdentity, context.identity(), signedIdentityDocument.providerUniqueId().asDottedString(), EntityBindingsMapper.toAttestationData(signedIdentityDocument), false, csr); EntityBindingsMapper.writeSignedIdentityDocumentToFile(identityDocumentFile, signedIdentityDocument); writePrivateKeyAndCertificate(privateKeyFile, keyPair.getPrivate(), certificateFile, instanceIdentity.certificate()); context.log(logger, "Instance successfully registered and credentials written to file"); } catch (IOException e) { throw new UncheckedIOException(e); } } @SuppressWarnings("deprecation") private void refreshIdentity(NodeAgentContext context, Path privateKeyFile, Path certificateFile, Path identityDocumentFile) { SignedIdentityDocument identityDocument = EntityBindingsMapper.readSignedIdentityDocumentFromFile(identityDocumentFile); KeyPair keyPair = KeyUtils.generateKeypair(KeyAlgorithm.RSA); com.yahoo.vespa.athenz.tls.Pkcs10Csr csr = csrGenerator.generateInstanceCsr( context.identity(), identityDocument.providerUniqueId(), identityDocument.ipAddresses(), keyPair); SSLContext containerIdentitySslContext = new SslContextBuilder() .withKeyStore(privateKeyFile, certificateFile) .withTrustStore(trustStorePath, KeyStoreType.JKS) .build(); try { try (ZtsClient ztsClient = new DefaultZtsClient(ztsEndpoint, context.identity(), containerIdentitySslContext)) { InstanceIdentity instanceIdentity = ztsClient.refreshInstance( configserverIdentity, context.identity(), identityDocument.providerUniqueId().asDottedString(), false, csr); writePrivateKeyAndCertificate(privateKeyFile, keyPair.getPrivate(), certificateFile, instanceIdentity.certificate()); context.log(logger, "Instance successfully refreshed and credentials written to file"); } catch (ZtsClientException e) { if (e.getErrorCode() == 403 && e.getDescription().startsWith("Certificate revoked")) { context.log(logger, LogLevel.ERROR, "Certificate cannot be refreshed as it is revoked by ZTS - re-registering the instance now", e); registerIdentity(context, privateKeyFile, certificateFile, identityDocumentFile); } else { throw e; } } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Certificate refresh failed: " + e.getMessage(), e); } } private static void writePrivateKeyAndCertificate( Path privateKeyFile, PrivateKey privateKey, Path certificateFile, X509Certificate certificate) throws IOException { Path tempPrivateKeyFile = toTempPath(privateKeyFile); Files.write(tempPrivateKeyFile, KeyUtils.toPem(privateKey).getBytes()); Path tempCertificateFile = toTempPath(certificateFile); Files.write(tempCertificateFile, X509CertificateUtils.toPem(certificate).getBytes()); Files.move(tempPrivateKeyFile, privateKeyFile, StandardCopyOption.ATOMIC_MOVE); Files.move(tempCertificateFile, certificateFile, StandardCopyOption.ATOMIC_MOVE); } private static Path toTempPath(Path file) { return Paths.get(file.toAbsolutePath().toString() + ".tmp"); } private static X509Certificate readCertificateFromFile(Path certificateFile) throws IOException { String pemEncodedCertificate = new String(Files.readAllBytes(certificateFile)); return X509CertificateUtils.fromPem(pemEncodedCertificate); } private static boolean isCertificateExpired(Instant expiry, Instant now) { return now.isAfter(expiry.minus(EXPIRY_MARGIN)); } }
This handler is protocol agnostic (comment added).
public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, "application/json"); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(ByteBuffer.wrap(buildMetricOutput())); } }.dispatch(handler); return null; }
Response response = new Response(Response.Status.OK);
public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, "application/json"); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(ByteBuffer.wrap(buildMetricOutput())); } }.dispatch(handler); return null; }
class MetricsPacketsHandler extends AbstractRequestHandler { static final String APPLICATION_KEY = "application"; static final String TIMESTAMP_KEY = "timestamp"; static final String STATUS_CODE_KEY = "status_code"; static final String STATUS_MSG_KEY = "status_msg"; static final String METRICS_KEY = "metrics"; static final String DIMENSIONS_KEY = "dimensions"; private final StateMonitor monitor; private final Timer timer; private final SnapshotProvider snapshotPreprocessor; private final String applicationName; @Inject public MetricsPacketsHandler(StateMonitor monitor, Timer timer, ComponentRegistry<SnapshotProvider> preprocessors, MetricsPresentationConfig presentation, MetricsPacketsHandlerConfig config) { this.monitor = monitor; this.timer = timer; snapshotPreprocessor = getSnapshotPreprocessor(preprocessors, presentation); applicationName = config.application(); } @Override private byte[] buildMetricOutput() { try { String output = getStatusPacket() + getAllMetricsPackets(); return output.getBytes(StandardCharsets.UTF_8); } catch (JSONException e) { throw new RuntimeException("Bad JSON construction.", e); } } /** * Exactly one status packet is added to the response. */ private String getStatusPacket() throws JSONException { JSONObject packet = new JSONObjectWithLegibleException(); packet.put(APPLICATION_KEY, applicationName); StateMonitor.Status status = monitor.status(); packet.put(STATUS_CODE_KEY, status.ordinal()); packet.put(STATUS_MSG_KEY, status.name()); return jsonToString(packet); } private String jsonToString(JSONObject jsonObject) throws JSONException { return jsonObject.toString(4); } private String getAllMetricsPackets() throws JSONException { StringBuilder ret = new StringBuilder(); List<JSONObject> metricsPackets = getPacketsForSnapshot(getSnapshot(), applicationName, timer.currentTimeMillis()); for (JSONObject packet : metricsPackets) { ret.append("\n\n"); ret.append(jsonToString(packet)); } return ret.toString(); } private MetricSnapshot getSnapshot() { if (snapshotPreprocessor == null) { return monitor.snapshot(); } else { return snapshotPreprocessor.latestSnapshot(); } } private List<JSONObject> getPacketsForSnapshot(MetricSnapshot metricSnapshot, String application, long timestamp) throws JSONException { if (metricSnapshot == null) return Collections.emptyList(); List<JSONObject> packets = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : metricSnapshot) { MetricDimensions metricDimensions = snapshotEntry.getKey(); MetricSet metricSet = snapshotEntry.getValue(); JSONObjectWithLegibleException packet = new JSONObjectWithLegibleException(); addMetaData(timestamp, application, packet); addDimensions(metricDimensions, packet); addMetrics(metricSet, packet); packets.add(packet); } return packets; } private void addMetaData(long timestamp, String application, JSONObjectWithLegibleException packet) { packet.put(APPLICATION_KEY, application); packet.put(TIMESTAMP_KEY, timestamp); } private void addDimensions(MetricDimensions metricDimensions, JSONObjectWithLegibleException packet) throws JSONException { Iterator<Map.Entry<String, String>> dimensionsIterator = metricDimensions.iterator(); if (dimensionsIterator.hasNext()) { JSONObject jsonDim = new JSONObjectWithLegibleException(); packet.put(DIMENSIONS_KEY, jsonDim); for (Map.Entry<String, String> dimensionEntry : metricDimensions) { jsonDim.put(dimensionEntry.getKey(), dimensionEntry.getValue()); } } } private void addMetrics(MetricSet metricSet, JSONObjectWithLegibleException packet) throws JSONException { JSONObjectWithLegibleException metrics = new JSONObjectWithLegibleException(); packet.put(METRICS_KEY, metrics); for (Map.Entry<String, MetricValue> metric : metricSet) { String name = metric.getKey(); MetricValue value = metric.getValue(); if (value instanceof CountMetric) { metrics.put(name + ".count", ((CountMetric) value).getCount()); } else if (value instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) value; metrics.put(name + ".average", gauge.getAverage()) .put(name + ".last", gauge.getLast()) .put(name + ".max", gauge.getMax()); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { metrics.put(name + "." + prefixAndValue.first + "percentile", prefixAndValue.second.doubleValue()); } } } else { throw new UnsupportedOperationException("Unknown metric class: " + value.getClass().getName()); } } } }
class MetricsPacketsHandler extends AbstractRequestHandler { static final String APPLICATION_KEY = "application"; static final String TIMESTAMP_KEY = "timestamp"; static final String STATUS_CODE_KEY = "status_code"; static final String STATUS_MSG_KEY = "status_msg"; static final String METRICS_KEY = "metrics"; static final String DIMENSIONS_KEY = "dimensions"; static final String PACKET_SEPARATOR = "\n\n"; private final StateMonitor monitor; private final Timer timer; private final SnapshotProvider snapshotPreprocessor; private final String applicationName; @Inject public MetricsPacketsHandler(StateMonitor monitor, Timer timer, ComponentRegistry<SnapshotProvider> preprocessors, MetricsPresentationConfig presentation, MetricsPacketsHandlerConfig config) { this.monitor = monitor; this.timer = timer; snapshotPreprocessor = getSnapshotPreprocessor(preprocessors, presentation); applicationName = config.application(); } @Override private byte[] buildMetricOutput() { try { String output = getStatusPacket() + getAllMetricsPackets(); return output.getBytes(StandardCharsets.UTF_8); } catch (JSONException e) { throw new RuntimeException("Bad JSON construction.", e); } } /** * Exactly one status packet is added to the response. */ private String getStatusPacket() throws JSONException { JSONObject packet = new JSONObjectWithLegibleException(); packet.put(APPLICATION_KEY, applicationName); StateMonitor.Status status = monitor.status(); packet.put(STATUS_CODE_KEY, status.ordinal()); packet.put(STATUS_MSG_KEY, status.name()); return jsonToString(packet); } private String jsonToString(JSONObject jsonObject) throws JSONException { return jsonObject.toString(4); } private String getAllMetricsPackets() throws JSONException { StringBuilder ret = new StringBuilder(); List<JSONObject> metricsPackets = getPacketsForSnapshot(getSnapshot(), applicationName, timer.currentTimeMillis()); for (JSONObject packet : metricsPackets) { ret.append(PACKET_SEPARATOR); ret.append(jsonToString(packet)); } return ret.toString(); } private MetricSnapshot getSnapshot() { if (snapshotPreprocessor == null) { return monitor.snapshot(); } else { return snapshotPreprocessor.latestSnapshot(); } } private List<JSONObject> getPacketsForSnapshot(MetricSnapshot metricSnapshot, String application, long timestamp) throws JSONException { if (metricSnapshot == null) return Collections.emptyList(); List<JSONObject> packets = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : metricSnapshot) { MetricDimensions metricDimensions = snapshotEntry.getKey(); MetricSet metricSet = snapshotEntry.getValue(); JSONObjectWithLegibleException packet = new JSONObjectWithLegibleException(); addMetaData(timestamp, application, packet); addDimensions(metricDimensions, packet); addMetrics(metricSet, packet); packets.add(packet); } return packets; } private void addMetaData(long timestamp, String application, JSONObjectWithLegibleException packet) { packet.put(APPLICATION_KEY, application); packet.put(TIMESTAMP_KEY, timestamp); } private void addDimensions(MetricDimensions metricDimensions, JSONObjectWithLegibleException packet) throws JSONException { Iterator<Map.Entry<String, String>> dimensionsIterator = metricDimensions.iterator(); if (dimensionsIterator.hasNext()) { JSONObject jsonDim = new JSONObjectWithLegibleException(); packet.put(DIMENSIONS_KEY, jsonDim); for (Map.Entry<String, String> dimensionEntry : metricDimensions) { jsonDim.put(dimensionEntry.getKey(), dimensionEntry.getValue()); } } } private void addMetrics(MetricSet metricSet, JSONObjectWithLegibleException packet) throws JSONException { JSONObjectWithLegibleException metrics = new JSONObjectWithLegibleException(); packet.put(METRICS_KEY, metrics); for (Map.Entry<String, MetricValue> metric : metricSet) { String name = metric.getKey(); MetricValue value = metric.getValue(); if (value instanceof CountMetric) { metrics.put(name + ".count", ((CountMetric) value).getCount()); } else if (value instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) value; metrics.put(name + ".average", gauge.getAverage()) .put(name + ".last", gauge.getLast()) .put(name + ".max", gauge.getMax()); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { metrics.put(name + "." + prefixAndValue.first + "percentile", prefixAndValue.second.doubleValue()); } } } else { throw new UnsupportedOperationException("Unknown metric class: " + value.getClass().getName()); } } } }
Why can't you just say return docSendInfoByOperationId.isEmpty() ? Optional.empty() : Optional.of(docSendInfoByOperationId.keySet().iterator().next) like every normal person? :-)
public Optional<String> oldestIncompleteResultId() { synchronized (monitor) { return Optional.of(docSendInfoByOperationId.keySet().iterator()) .filter(Iterator::hasNext) .map(Iterator::next); } }
return Optional.of(docSendInfoByOperationId.keySet().iterator())
public Optional<String> oldestIncompleteResultId() { synchronized (monitor) { return docSendInfoByOperationId.isEmpty() ? Optional.empty() : Optional.of(docSendInfoByOperationId.keySet().iterator().next()); } }
class OperationProcessor { private static final Logger log = Logger.getLogger(OperationProcessor.class.getName()); private final Map<String, DocumentSendInfo> docSendInfoByOperationId = new LinkedHashMap<>(); private final ArrayListMultimap<String, Document> blockedDocumentsByDocumentId = ArrayListMultimap.create(); private final Set<String> inflightDocumentIds = new HashSet<>(); private final int numDestinations; private final FeedClient.ResultCallback resultCallback; private final Object monitor = new Object(); private final IncompleteResultsThrottler incompleteResultsThrottler; private final List<ClusterConnection> clusters = new ArrayList<>(); private final ScheduledThreadPoolExecutor timeoutExecutor; private final OperationStats operationStats; private final int maxRetries; private final long minTimeBetweenRetriesMs; private final Random random = new SecureRandom(); private final int traceEveryXOperation; private final boolean blockOperationsToSameDocument; private int traceCounter = 0; private final boolean traceToStderr; private final String clientId = new BigInteger(130, random).toString(32);; public OperationProcessor( IncompleteResultsThrottler incompleteResultsThrottler, FeedClient.ResultCallback resultCallback, SessionParams sessionParams, ScheduledThreadPoolExecutor timeoutExecutor) { this.numDestinations = sessionParams.getClusters().size(); this.resultCallback = resultCallback; this.incompleteResultsThrottler = incompleteResultsThrottler; this.timeoutExecutor = timeoutExecutor; this.blockOperationsToSameDocument = sessionParams.getConnectionParams().isEnableV3Protocol(); if (sessionParams.getClusters().isEmpty()) { throw new IllegalArgumentException("Cannot feed to 0 clusters."); } for (Cluster cluster : sessionParams.getClusters()) { if (cluster.getEndpoints().isEmpty()) { throw new IllegalArgumentException("Cannot feed to empty cluster."); } } for (int i = 0; i < sessionParams.getClusters().size(); i++) { Cluster cluster = sessionParams.getClusters().get(i); clusters.add(new ClusterConnection( this, sessionParams.getFeedParams(), sessionParams.getConnectionParams(), sessionParams.getErrorReport(), cluster, i, sessionParams.getClientQueueSize() / sessionParams.getClusters().size(), timeoutExecutor)); } operationStats = new OperationStats(sessionParams, clusters, incompleteResultsThrottler); maxRetries = sessionParams.getConnectionParams().getMaxRetries(); minTimeBetweenRetriesMs = sessionParams.getConnectionParams().getMinTimeBetweenRetriesMs(); traceEveryXOperation = sessionParams.getConnectionParams().getTraceEveryXOperation(); traceToStderr = sessionParams.getConnectionParams().getPrintTraceToStdErr(); } public int getIncompleteResultQueueSize() { synchronized (monitor) { return docSendInfoByOperationId.size(); } } /** Returns the id of the oldest operation to be sent. */ public String getClientId() { return clientId; } private boolean retriedThis(EndpointResult endpointResult, DocumentSendInfo documentSendInfo, int clusterId) { final Result.Detail detail = endpointResult.getDetail(); if (detail.getResultType() == Result.ResultType.OPERATION_EXECUTED) { return false; } int retries = documentSendInfo.incRetries(clusterId, detail); if (retries > maxRetries) { return false; } String exceptionMessage = detail.getException() == null ? "" : detail.getException().getMessage(); if (exceptionMessage == null) { exceptionMessage = ""; } boolean retryThisOperation = detail.getResultType() == Result.ResultType.TRANSITIVE_ERROR || exceptionMessage.contains("SEND_QUEUE_CLOSED") || exceptionMessage.contains("ILLEGAL_ROUTE") || exceptionMessage.contains("NO_SERVICES_FOR_ROUTE") || exceptionMessage.contains("NETWORK_ERROR") || exceptionMessage.contains("SEQUENCE_ERROR") || exceptionMessage.contains("NETWORK_SHUTDOWN") || exceptionMessage.contains("TIMEOUT"); if (retryThisOperation) { int waitTime = (int) (minTimeBetweenRetriesMs * (1 + random.nextDouble() / 3)); log.finest("Retrying due to " + detail.toString() + " attempt " + retries + " in " + waitTime + " ms."); timeoutExecutor.schedule( () -> postToCluster(clusters.get(clusterId), documentSendInfo.getDocument()), waitTime, TimeUnit.MILLISECONDS); return true; } return false; } private Result process(EndpointResult endpointResult, int clusterId) { synchronized (monitor) { if (!docSendInfoByOperationId.containsKey(endpointResult.getOperationId())) { log.finer("Received out-of-order or too late result, discarding: " + endpointResult); return null; } DocumentSendInfo documentSendInfo = docSendInfoByOperationId.get(endpointResult.getOperationId()); if (retriedThis(endpointResult, documentSendInfo, clusterId)) { return null; } if (!documentSendInfo.addIfNotAlreadyThere(endpointResult.getDetail(), clusterId)) { return null; } if (documentSendInfo.detailCount() != numDestinations) { return null; } Result result = documentSendInfo.createResult(); docSendInfoByOperationId.remove(endpointResult.getOperationId()); String documentId = documentSendInfo.getDocument().getDocumentId(); /** * If we got a pending operation against this document * dont't remove it from inflightDocuments and send blocked document operation */ List<Document> blockedDocuments = blockedDocumentsByDocumentId.get(documentId); if (blockedDocuments.isEmpty()) { inflightDocumentIds.remove(documentId); } else { sendToClusters(blockedDocuments.remove(0)); } return result; } } public void resultReceived(EndpointResult endpointResult, int clusterId) { final Result result = process(endpointResult, clusterId); if (result != null) { incompleteResultsThrottler.resultReady(result.isSuccess()); resultCallback.onCompletion(result.getDocumentId(), result); if (traceToStderr && result.hasLocalTrace()) { System.err.println(result.toString()); } } } public void onEndpointError(FeedEndpointException e) { resultCallback.onEndpointException(e); } public List<Exception> closeClusters() { List<Exception> exceptions = new ArrayList<>(); for (ClusterConnection cluster : clusters) { try { cluster.close(); } catch (Exception e) { exceptions.add(e); } } return exceptions; } public void sendDocument(Document document) { incompleteResultsThrottler.operationStart(); synchronized (monitor) { if (blockOperationsToSameDocument && inflightDocumentIds.contains(document.getDocumentId())) { blockedDocumentsByDocumentId.put(document.getDocumentId(), document); return; } inflightDocumentIds.add(document.getDocumentId()); } sendToClusters(document); } private void sendToClusters(Document document) { synchronized (monitor) { boolean traceThisDoc = traceEveryXOperation > 0 && traceCounter++ % traceEveryXOperation == 0; docSendInfoByOperationId.put(document.getOperationId(), new DocumentSendInfo(document, traceThisDoc)); } for (ClusterConnection clusterConnection : clusters) { postToCluster(clusterConnection, document); } } private void postToCluster(ClusterConnection clusterConnection, Document document) { try { clusterConnection.post(document); } catch (EndpointIOException eio) { resultReceived(EndPointResultFactory.createError(eio.getEndpoint(), document.getOperationId(), eio), clusterConnection.getClusterId()); } } public String getStatsAsJson() { return operationStats.getStatsAsJson(); } public void close() { List<Exception> exceptions = closeClusters(); try { closeExecutor(); } catch (InterruptedException e) { exceptions.add(e); } if (exceptions.isEmpty()) { return; } if (exceptions.size() == 1) { if (exceptions.get(0) instanceof RuntimeException) { throw (RuntimeException) exceptions.get(0); } else { throw new RuntimeException(exceptions.get(0)); } } StringBuilder b = new StringBuilder(); b.append("Exception thrown while closing one or more clusters: "); for (int i = 0; i < exceptions.size(); i++) { Exception e = exceptions.get(i); b.append(Exceptions.toMessageString(e)); if (i != (exceptions.size() - 1)) { b.append(", "); } } throw new RuntimeException(b.toString(), exceptions.get(0)); } private void closeExecutor() throws InterruptedException { log.log(Level.FINE, "Shutting down timeout executor."); timeoutExecutor.shutdownNow(); log.log(Level.FINE, "Awaiting termination of already running timeout tasks."); if (! timeoutExecutor.awaitTermination(300, TimeUnit.SECONDS)) { log.severe("Did not manage to shut down the executors within 300 secs, system stuck?"); throw new RuntimeException("Did not manage to shut down retry threads. Please report problem."); } } }
class OperationProcessor { private static final Logger log = Logger.getLogger(OperationProcessor.class.getName()); private final Map<String, DocumentSendInfo> docSendInfoByOperationId = new LinkedHashMap<>(); private final ArrayListMultimap<String, Document> blockedDocumentsByDocumentId = ArrayListMultimap.create(); private final Set<String> inflightDocumentIds = new HashSet<>(); private final int numDestinations; private final FeedClient.ResultCallback resultCallback; private final Object monitor = new Object(); private final IncompleteResultsThrottler incompleteResultsThrottler; private final List<ClusterConnection> clusters = new ArrayList<>(); private final ScheduledThreadPoolExecutor timeoutExecutor; private final OperationStats operationStats; private final int maxRetries; private final long minTimeBetweenRetriesMs; private final Random random = new SecureRandom(); private final int traceEveryXOperation; private final boolean blockOperationsToSameDocument; private int traceCounter = 0; private final boolean traceToStderr; private final String clientId = new BigInteger(130, random).toString(32);; public OperationProcessor( IncompleteResultsThrottler incompleteResultsThrottler, FeedClient.ResultCallback resultCallback, SessionParams sessionParams, ScheduledThreadPoolExecutor timeoutExecutor) { this.numDestinations = sessionParams.getClusters().size(); this.resultCallback = resultCallback; this.incompleteResultsThrottler = incompleteResultsThrottler; this.timeoutExecutor = timeoutExecutor; this.blockOperationsToSameDocument = sessionParams.getConnectionParams().isEnableV3Protocol(); if (sessionParams.getClusters().isEmpty()) { throw new IllegalArgumentException("Cannot feed to 0 clusters."); } for (Cluster cluster : sessionParams.getClusters()) { if (cluster.getEndpoints().isEmpty()) { throw new IllegalArgumentException("Cannot feed to empty cluster."); } } for (int i = 0; i < sessionParams.getClusters().size(); i++) { Cluster cluster = sessionParams.getClusters().get(i); clusters.add(new ClusterConnection( this, sessionParams.getFeedParams(), sessionParams.getConnectionParams(), sessionParams.getErrorReport(), cluster, i, sessionParams.getClientQueueSize() / sessionParams.getClusters().size(), timeoutExecutor)); } operationStats = new OperationStats(sessionParams, clusters, incompleteResultsThrottler); maxRetries = sessionParams.getConnectionParams().getMaxRetries(); minTimeBetweenRetriesMs = sessionParams.getConnectionParams().getMinTimeBetweenRetriesMs(); traceEveryXOperation = sessionParams.getConnectionParams().getTraceEveryXOperation(); traceToStderr = sessionParams.getConnectionParams().getPrintTraceToStdErr(); } public int getIncompleteResultQueueSize() { synchronized (monitor) { return docSendInfoByOperationId.size(); } } /** Returns the id of the oldest operation to be sent. */ public String getClientId() { return clientId; } private boolean retriedThis(EndpointResult endpointResult, DocumentSendInfo documentSendInfo, int clusterId) { final Result.Detail detail = endpointResult.getDetail(); if (detail.getResultType() == Result.ResultType.OPERATION_EXECUTED) { return false; } int retries = documentSendInfo.incRetries(clusterId, detail); if (retries > maxRetries) { return false; } String exceptionMessage = detail.getException() == null ? "" : detail.getException().getMessage(); if (exceptionMessage == null) { exceptionMessage = ""; } boolean retryThisOperation = detail.getResultType() == Result.ResultType.TRANSITIVE_ERROR || exceptionMessage.contains("SEND_QUEUE_CLOSED") || exceptionMessage.contains("ILLEGAL_ROUTE") || exceptionMessage.contains("NO_SERVICES_FOR_ROUTE") || exceptionMessage.contains("NETWORK_ERROR") || exceptionMessage.contains("SEQUENCE_ERROR") || exceptionMessage.contains("NETWORK_SHUTDOWN") || exceptionMessage.contains("TIMEOUT"); if (retryThisOperation) { int waitTime = (int) (minTimeBetweenRetriesMs * (1 + random.nextDouble() / 3)); log.finest("Retrying due to " + detail.toString() + " attempt " + retries + " in " + waitTime + " ms."); timeoutExecutor.schedule( () -> postToCluster(clusters.get(clusterId), documentSendInfo.getDocument()), waitTime, TimeUnit.MILLISECONDS); return true; } return false; } private Result process(EndpointResult endpointResult, int clusterId) { synchronized (monitor) { if (!docSendInfoByOperationId.containsKey(endpointResult.getOperationId())) { log.finer("Received out-of-order or too late result, discarding: " + endpointResult); return null; } DocumentSendInfo documentSendInfo = docSendInfoByOperationId.get(endpointResult.getOperationId()); if (retriedThis(endpointResult, documentSendInfo, clusterId)) { return null; } if (!documentSendInfo.addIfNotAlreadyThere(endpointResult.getDetail(), clusterId)) { return null; } if (documentSendInfo.detailCount() != numDestinations) { return null; } Result result = documentSendInfo.createResult(); docSendInfoByOperationId.remove(endpointResult.getOperationId()); String documentId = documentSendInfo.getDocument().getDocumentId(); /** * If we got a pending operation against this document * dont't remove it from inflightDocuments and send blocked document operation */ List<Document> blockedDocuments = blockedDocumentsByDocumentId.get(documentId); if (blockedDocuments.isEmpty()) { inflightDocumentIds.remove(documentId); } else { sendToClusters(blockedDocuments.remove(0)); } return result; } } public void resultReceived(EndpointResult endpointResult, int clusterId) { final Result result = process(endpointResult, clusterId); if (result != null) { incompleteResultsThrottler.resultReady(result.isSuccess()); resultCallback.onCompletion(result.getDocumentId(), result); if (traceToStderr && result.hasLocalTrace()) { System.err.println(result.toString()); } } } public void onEndpointError(FeedEndpointException e) { resultCallback.onEndpointException(e); } public List<Exception> closeClusters() { List<Exception> exceptions = new ArrayList<>(); for (ClusterConnection cluster : clusters) { try { cluster.close(); } catch (Exception e) { exceptions.add(e); } } return exceptions; } public void sendDocument(Document document) { incompleteResultsThrottler.operationStart(); synchronized (monitor) { if (blockOperationsToSameDocument && inflightDocumentIds.contains(document.getDocumentId())) { blockedDocumentsByDocumentId.put(document.getDocumentId(), document); return; } inflightDocumentIds.add(document.getDocumentId()); } sendToClusters(document); } private void sendToClusters(Document document) { synchronized (monitor) { boolean traceThisDoc = traceEveryXOperation > 0 && traceCounter++ % traceEveryXOperation == 0; docSendInfoByOperationId.put(document.getOperationId(), new DocumentSendInfo(document, traceThisDoc)); } for (ClusterConnection clusterConnection : clusters) { postToCluster(clusterConnection, document); } } private void postToCluster(ClusterConnection clusterConnection, Document document) { try { clusterConnection.post(document); } catch (EndpointIOException eio) { resultReceived(EndPointResultFactory.createError(eio.getEndpoint(), document.getOperationId(), eio), clusterConnection.getClusterId()); } } public String getStatsAsJson() { return operationStats.getStatsAsJson(); } public void close() { List<Exception> exceptions = closeClusters(); try { closeExecutor(); } catch (InterruptedException e) { exceptions.add(e); } if (exceptions.isEmpty()) { return; } if (exceptions.size() == 1) { if (exceptions.get(0) instanceof RuntimeException) { throw (RuntimeException) exceptions.get(0); } else { throw new RuntimeException(exceptions.get(0)); } } StringBuilder b = new StringBuilder(); b.append("Exception thrown while closing one or more clusters: "); for (int i = 0; i < exceptions.size(); i++) { Exception e = exceptions.get(i); b.append(Exceptions.toMessageString(e)); if (i != (exceptions.size() - 1)) { b.append(", "); } } throw new RuntimeException(b.toString(), exceptions.get(0)); } private void closeExecutor() throws InterruptedException { log.log(Level.FINE, "Shutting down timeout executor."); timeoutExecutor.shutdownNow(); log.log(Level.FINE, "Awaiting termination of already running timeout tasks."); if (! timeoutExecutor.awaitTermination(300, TimeUnit.SECONDS)) { log.severe("Did not manage to shut down the executors within 300 secs, system stuck?"); throw new RuntimeException("Did not manage to shut down retry threads. Please report problem."); } } }
Strictly this should be called something else now.
public void close() { Instant lastResultReceived = Instant.now(); Optional<String> oldestIncompleteId = operationProcessor.oldestIncompleteResultId(); while (oldestIncompleteId.isPresent() && waitForOperations(lastResultReceived, sleepTimeMs, closeTimeoutMs)) { Optional<String> oldestIncompleteIdNow = operationProcessor.oldestIncompleteResultId(); if ( ! oldestIncompleteId.equals(oldestIncompleteIdNow)) lastResultReceived = Instant.now(); oldestIncompleteId = oldestIncompleteIdNow; } operationProcessor.close(); }
Instant lastResultReceived = Instant.now();
public void close() { Instant lastOldestResultReceivedAt = Instant.now(); Optional<String> oldestIncompleteId = operationProcessor.oldestIncompleteResultId(); while (oldestIncompleteId.isPresent() && waitForOperations(lastOldestResultReceivedAt, sleepTimeMs, closeTimeoutMs)) { Optional<String> oldestIncompleteIdNow = operationProcessor.oldestIncompleteResultId(); if ( ! oldestIncompleteId.equals(oldestIncompleteIdNow)) lastOldestResultReceivedAt = Instant.now(); oldestIncompleteId = oldestIncompleteIdNow; } operationProcessor.close(); }
class FeedClientImpl implements FeedClient { private final OperationProcessor operationProcessor; private final long closeTimeoutMs; private final long sleepTimeMs = 500; public FeedClientImpl( SessionParams sessionParams, ResultCallback resultCallback, ScheduledThreadPoolExecutor timeoutExecutor) { this.closeTimeoutMs = (1 + sessionParams.getConnectionParams().getMaxRetries()) * ( sessionParams.getFeedParams().getServerTimeout(TimeUnit.MILLISECONDS) + sessionParams.getFeedParams().getClientTimeout(TimeUnit.MILLISECONDS)); this.operationProcessor = new OperationProcessor( new IncompleteResultsThrottler( sessionParams.getThrottlerMinSize(), sessionParams.getClientQueueSize(), ()->System.currentTimeMillis(), new ThrottlePolicy()), resultCallback, sessionParams, timeoutExecutor); } @Override public void stream(String documentId, CharSequence documentData) { stream(documentId, documentData, null); } @Override public void stream(String documentId, CharSequence documentData, Object context) { CharsetEncoder charsetEncoder = StandardCharsets.UTF_8.newEncoder(); charsetEncoder.onMalformedInput(CodingErrorAction.REPORT); charsetEncoder.onUnmappableCharacter(CodingErrorAction.REPORT); Document document = new Document(documentId, documentData, context); operationProcessor.sendDocument(document); } @Override @Override public String getStatsAsJson() { return operationProcessor.getStatsAsJson(); } public static boolean waitForOperations(Instant lastResultReceived, long sleepTimeMs, long closeTimeoutMs) { if (lastResultReceived.plusMillis(closeTimeoutMs).isBefore(Instant.now())) { return false; } try { Thread.sleep(sleepTimeMs); } catch (InterruptedException e) { return false; } return true; } }
class FeedClientImpl implements FeedClient { private final OperationProcessor operationProcessor; private final long closeTimeoutMs; private final long sleepTimeMs = 500; public FeedClientImpl( SessionParams sessionParams, ResultCallback resultCallback, ScheduledThreadPoolExecutor timeoutExecutor) { this.closeTimeoutMs = (1 + sessionParams.getConnectionParams().getMaxRetries()) * ( sessionParams.getFeedParams().getServerTimeout(TimeUnit.MILLISECONDS) + sessionParams.getFeedParams().getClientTimeout(TimeUnit.MILLISECONDS)); this.operationProcessor = new OperationProcessor( new IncompleteResultsThrottler( sessionParams.getThrottlerMinSize(), sessionParams.getClientQueueSize(), ()->System.currentTimeMillis(), new ThrottlePolicy()), resultCallback, sessionParams, timeoutExecutor); } @Override public void stream(String documentId, CharSequence documentData) { stream(documentId, documentData, null); } @Override public void stream(String documentId, CharSequence documentData, Object context) { CharsetEncoder charsetEncoder = StandardCharsets.UTF_8.newEncoder(); charsetEncoder.onMalformedInput(CodingErrorAction.REPORT); charsetEncoder.onUnmappableCharacter(CodingErrorAction.REPORT); Document document = new Document(documentId, documentData, context); operationProcessor.sendDocument(document); } @Override @Override public String getStatsAsJson() { return operationProcessor.getStatsAsJson(); } public static boolean waitForOperations(Instant lastResultReceived, long sleepTimeMs, long closeTimeoutMs) { if (lastResultReceived.plusMillis(closeTimeoutMs).isBefore(Instant.now())) { return false; } try { Thread.sleep(sleepTimeMs); } catch (InterruptedException e) { return false; } return true; } }
Hehe. You don't have a bad point.
public Optional<String> oldestIncompleteResultId() { synchronized (monitor) { return Optional.of(docSendInfoByOperationId.keySet().iterator()) .filter(Iterator::hasNext) .map(Iterator::next); } }
return Optional.of(docSendInfoByOperationId.keySet().iterator())
public Optional<String> oldestIncompleteResultId() { synchronized (monitor) { return docSendInfoByOperationId.isEmpty() ? Optional.empty() : Optional.of(docSendInfoByOperationId.keySet().iterator().next()); } }
class OperationProcessor { private static final Logger log = Logger.getLogger(OperationProcessor.class.getName()); private final Map<String, DocumentSendInfo> docSendInfoByOperationId = new LinkedHashMap<>(); private final ArrayListMultimap<String, Document> blockedDocumentsByDocumentId = ArrayListMultimap.create(); private final Set<String> inflightDocumentIds = new HashSet<>(); private final int numDestinations; private final FeedClient.ResultCallback resultCallback; private final Object monitor = new Object(); private final IncompleteResultsThrottler incompleteResultsThrottler; private final List<ClusterConnection> clusters = new ArrayList<>(); private final ScheduledThreadPoolExecutor timeoutExecutor; private final OperationStats operationStats; private final int maxRetries; private final long minTimeBetweenRetriesMs; private final Random random = new SecureRandom(); private final int traceEveryXOperation; private final boolean blockOperationsToSameDocument; private int traceCounter = 0; private final boolean traceToStderr; private final String clientId = new BigInteger(130, random).toString(32);; public OperationProcessor( IncompleteResultsThrottler incompleteResultsThrottler, FeedClient.ResultCallback resultCallback, SessionParams sessionParams, ScheduledThreadPoolExecutor timeoutExecutor) { this.numDestinations = sessionParams.getClusters().size(); this.resultCallback = resultCallback; this.incompleteResultsThrottler = incompleteResultsThrottler; this.timeoutExecutor = timeoutExecutor; this.blockOperationsToSameDocument = sessionParams.getConnectionParams().isEnableV3Protocol(); if (sessionParams.getClusters().isEmpty()) { throw new IllegalArgumentException("Cannot feed to 0 clusters."); } for (Cluster cluster : sessionParams.getClusters()) { if (cluster.getEndpoints().isEmpty()) { throw new IllegalArgumentException("Cannot feed to empty cluster."); } } for (int i = 0; i < sessionParams.getClusters().size(); i++) { Cluster cluster = sessionParams.getClusters().get(i); clusters.add(new ClusterConnection( this, sessionParams.getFeedParams(), sessionParams.getConnectionParams(), sessionParams.getErrorReport(), cluster, i, sessionParams.getClientQueueSize() / sessionParams.getClusters().size(), timeoutExecutor)); } operationStats = new OperationStats(sessionParams, clusters, incompleteResultsThrottler); maxRetries = sessionParams.getConnectionParams().getMaxRetries(); minTimeBetweenRetriesMs = sessionParams.getConnectionParams().getMinTimeBetweenRetriesMs(); traceEveryXOperation = sessionParams.getConnectionParams().getTraceEveryXOperation(); traceToStderr = sessionParams.getConnectionParams().getPrintTraceToStdErr(); } public int getIncompleteResultQueueSize() { synchronized (monitor) { return docSendInfoByOperationId.size(); } } /** Returns the id of the oldest operation to be sent. */ public String getClientId() { return clientId; } private boolean retriedThis(EndpointResult endpointResult, DocumentSendInfo documentSendInfo, int clusterId) { final Result.Detail detail = endpointResult.getDetail(); if (detail.getResultType() == Result.ResultType.OPERATION_EXECUTED) { return false; } int retries = documentSendInfo.incRetries(clusterId, detail); if (retries > maxRetries) { return false; } String exceptionMessage = detail.getException() == null ? "" : detail.getException().getMessage(); if (exceptionMessage == null) { exceptionMessage = ""; } boolean retryThisOperation = detail.getResultType() == Result.ResultType.TRANSITIVE_ERROR || exceptionMessage.contains("SEND_QUEUE_CLOSED") || exceptionMessage.contains("ILLEGAL_ROUTE") || exceptionMessage.contains("NO_SERVICES_FOR_ROUTE") || exceptionMessage.contains("NETWORK_ERROR") || exceptionMessage.contains("SEQUENCE_ERROR") || exceptionMessage.contains("NETWORK_SHUTDOWN") || exceptionMessage.contains("TIMEOUT"); if (retryThisOperation) { int waitTime = (int) (minTimeBetweenRetriesMs * (1 + random.nextDouble() / 3)); log.finest("Retrying due to " + detail.toString() + " attempt " + retries + " in " + waitTime + " ms."); timeoutExecutor.schedule( () -> postToCluster(clusters.get(clusterId), documentSendInfo.getDocument()), waitTime, TimeUnit.MILLISECONDS); return true; } return false; } private Result process(EndpointResult endpointResult, int clusterId) { synchronized (monitor) { if (!docSendInfoByOperationId.containsKey(endpointResult.getOperationId())) { log.finer("Received out-of-order or too late result, discarding: " + endpointResult); return null; } DocumentSendInfo documentSendInfo = docSendInfoByOperationId.get(endpointResult.getOperationId()); if (retriedThis(endpointResult, documentSendInfo, clusterId)) { return null; } if (!documentSendInfo.addIfNotAlreadyThere(endpointResult.getDetail(), clusterId)) { return null; } if (documentSendInfo.detailCount() != numDestinations) { return null; } Result result = documentSendInfo.createResult(); docSendInfoByOperationId.remove(endpointResult.getOperationId()); String documentId = documentSendInfo.getDocument().getDocumentId(); /** * If we got a pending operation against this document * dont't remove it from inflightDocuments and send blocked document operation */ List<Document> blockedDocuments = blockedDocumentsByDocumentId.get(documentId); if (blockedDocuments.isEmpty()) { inflightDocumentIds.remove(documentId); } else { sendToClusters(blockedDocuments.remove(0)); } return result; } } public void resultReceived(EndpointResult endpointResult, int clusterId) { final Result result = process(endpointResult, clusterId); if (result != null) { incompleteResultsThrottler.resultReady(result.isSuccess()); resultCallback.onCompletion(result.getDocumentId(), result); if (traceToStderr && result.hasLocalTrace()) { System.err.println(result.toString()); } } } public void onEndpointError(FeedEndpointException e) { resultCallback.onEndpointException(e); } public List<Exception> closeClusters() { List<Exception> exceptions = new ArrayList<>(); for (ClusterConnection cluster : clusters) { try { cluster.close(); } catch (Exception e) { exceptions.add(e); } } return exceptions; } public void sendDocument(Document document) { incompleteResultsThrottler.operationStart(); synchronized (monitor) { if (blockOperationsToSameDocument && inflightDocumentIds.contains(document.getDocumentId())) { blockedDocumentsByDocumentId.put(document.getDocumentId(), document); return; } inflightDocumentIds.add(document.getDocumentId()); } sendToClusters(document); } private void sendToClusters(Document document) { synchronized (monitor) { boolean traceThisDoc = traceEveryXOperation > 0 && traceCounter++ % traceEveryXOperation == 0; docSendInfoByOperationId.put(document.getOperationId(), new DocumentSendInfo(document, traceThisDoc)); } for (ClusterConnection clusterConnection : clusters) { postToCluster(clusterConnection, document); } } private void postToCluster(ClusterConnection clusterConnection, Document document) { try { clusterConnection.post(document); } catch (EndpointIOException eio) { resultReceived(EndPointResultFactory.createError(eio.getEndpoint(), document.getOperationId(), eio), clusterConnection.getClusterId()); } } public String getStatsAsJson() { return operationStats.getStatsAsJson(); } public void close() { List<Exception> exceptions = closeClusters(); try { closeExecutor(); } catch (InterruptedException e) { exceptions.add(e); } if (exceptions.isEmpty()) { return; } if (exceptions.size() == 1) { if (exceptions.get(0) instanceof RuntimeException) { throw (RuntimeException) exceptions.get(0); } else { throw new RuntimeException(exceptions.get(0)); } } StringBuilder b = new StringBuilder(); b.append("Exception thrown while closing one or more clusters: "); for (int i = 0; i < exceptions.size(); i++) { Exception e = exceptions.get(i); b.append(Exceptions.toMessageString(e)); if (i != (exceptions.size() - 1)) { b.append(", "); } } throw new RuntimeException(b.toString(), exceptions.get(0)); } private void closeExecutor() throws InterruptedException { log.log(Level.FINE, "Shutting down timeout executor."); timeoutExecutor.shutdownNow(); log.log(Level.FINE, "Awaiting termination of already running timeout tasks."); if (! timeoutExecutor.awaitTermination(300, TimeUnit.SECONDS)) { log.severe("Did not manage to shut down the executors within 300 secs, system stuck?"); throw new RuntimeException("Did not manage to shut down retry threads. Please report problem."); } } }
class OperationProcessor { private static final Logger log = Logger.getLogger(OperationProcessor.class.getName()); private final Map<String, DocumentSendInfo> docSendInfoByOperationId = new LinkedHashMap<>(); private final ArrayListMultimap<String, Document> blockedDocumentsByDocumentId = ArrayListMultimap.create(); private final Set<String> inflightDocumentIds = new HashSet<>(); private final int numDestinations; private final FeedClient.ResultCallback resultCallback; private final Object monitor = new Object(); private final IncompleteResultsThrottler incompleteResultsThrottler; private final List<ClusterConnection> clusters = new ArrayList<>(); private final ScheduledThreadPoolExecutor timeoutExecutor; private final OperationStats operationStats; private final int maxRetries; private final long minTimeBetweenRetriesMs; private final Random random = new SecureRandom(); private final int traceEveryXOperation; private final boolean blockOperationsToSameDocument; private int traceCounter = 0; private final boolean traceToStderr; private final String clientId = new BigInteger(130, random).toString(32);; public OperationProcessor( IncompleteResultsThrottler incompleteResultsThrottler, FeedClient.ResultCallback resultCallback, SessionParams sessionParams, ScheduledThreadPoolExecutor timeoutExecutor) { this.numDestinations = sessionParams.getClusters().size(); this.resultCallback = resultCallback; this.incompleteResultsThrottler = incompleteResultsThrottler; this.timeoutExecutor = timeoutExecutor; this.blockOperationsToSameDocument = sessionParams.getConnectionParams().isEnableV3Protocol(); if (sessionParams.getClusters().isEmpty()) { throw new IllegalArgumentException("Cannot feed to 0 clusters."); } for (Cluster cluster : sessionParams.getClusters()) { if (cluster.getEndpoints().isEmpty()) { throw new IllegalArgumentException("Cannot feed to empty cluster."); } } for (int i = 0; i < sessionParams.getClusters().size(); i++) { Cluster cluster = sessionParams.getClusters().get(i); clusters.add(new ClusterConnection( this, sessionParams.getFeedParams(), sessionParams.getConnectionParams(), sessionParams.getErrorReport(), cluster, i, sessionParams.getClientQueueSize() / sessionParams.getClusters().size(), timeoutExecutor)); } operationStats = new OperationStats(sessionParams, clusters, incompleteResultsThrottler); maxRetries = sessionParams.getConnectionParams().getMaxRetries(); minTimeBetweenRetriesMs = sessionParams.getConnectionParams().getMinTimeBetweenRetriesMs(); traceEveryXOperation = sessionParams.getConnectionParams().getTraceEveryXOperation(); traceToStderr = sessionParams.getConnectionParams().getPrintTraceToStdErr(); } public int getIncompleteResultQueueSize() { synchronized (monitor) { return docSendInfoByOperationId.size(); } } /** Returns the id of the oldest operation to be sent. */ public String getClientId() { return clientId; } private boolean retriedThis(EndpointResult endpointResult, DocumentSendInfo documentSendInfo, int clusterId) { final Result.Detail detail = endpointResult.getDetail(); if (detail.getResultType() == Result.ResultType.OPERATION_EXECUTED) { return false; } int retries = documentSendInfo.incRetries(clusterId, detail); if (retries > maxRetries) { return false; } String exceptionMessage = detail.getException() == null ? "" : detail.getException().getMessage(); if (exceptionMessage == null) { exceptionMessage = ""; } boolean retryThisOperation = detail.getResultType() == Result.ResultType.TRANSITIVE_ERROR || exceptionMessage.contains("SEND_QUEUE_CLOSED") || exceptionMessage.contains("ILLEGAL_ROUTE") || exceptionMessage.contains("NO_SERVICES_FOR_ROUTE") || exceptionMessage.contains("NETWORK_ERROR") || exceptionMessage.contains("SEQUENCE_ERROR") || exceptionMessage.contains("NETWORK_SHUTDOWN") || exceptionMessage.contains("TIMEOUT"); if (retryThisOperation) { int waitTime = (int) (minTimeBetweenRetriesMs * (1 + random.nextDouble() / 3)); log.finest("Retrying due to " + detail.toString() + " attempt " + retries + " in " + waitTime + " ms."); timeoutExecutor.schedule( () -> postToCluster(clusters.get(clusterId), documentSendInfo.getDocument()), waitTime, TimeUnit.MILLISECONDS); return true; } return false; } private Result process(EndpointResult endpointResult, int clusterId) { synchronized (monitor) { if (!docSendInfoByOperationId.containsKey(endpointResult.getOperationId())) { log.finer("Received out-of-order or too late result, discarding: " + endpointResult); return null; } DocumentSendInfo documentSendInfo = docSendInfoByOperationId.get(endpointResult.getOperationId()); if (retriedThis(endpointResult, documentSendInfo, clusterId)) { return null; } if (!documentSendInfo.addIfNotAlreadyThere(endpointResult.getDetail(), clusterId)) { return null; } if (documentSendInfo.detailCount() != numDestinations) { return null; } Result result = documentSendInfo.createResult(); docSendInfoByOperationId.remove(endpointResult.getOperationId()); String documentId = documentSendInfo.getDocument().getDocumentId(); /** * If we got a pending operation against this document * dont't remove it from inflightDocuments and send blocked document operation */ List<Document> blockedDocuments = blockedDocumentsByDocumentId.get(documentId); if (blockedDocuments.isEmpty()) { inflightDocumentIds.remove(documentId); } else { sendToClusters(blockedDocuments.remove(0)); } return result; } } public void resultReceived(EndpointResult endpointResult, int clusterId) { final Result result = process(endpointResult, clusterId); if (result != null) { incompleteResultsThrottler.resultReady(result.isSuccess()); resultCallback.onCompletion(result.getDocumentId(), result); if (traceToStderr && result.hasLocalTrace()) { System.err.println(result.toString()); } } } public void onEndpointError(FeedEndpointException e) { resultCallback.onEndpointException(e); } public List<Exception> closeClusters() { List<Exception> exceptions = new ArrayList<>(); for (ClusterConnection cluster : clusters) { try { cluster.close(); } catch (Exception e) { exceptions.add(e); } } return exceptions; } public void sendDocument(Document document) { incompleteResultsThrottler.operationStart(); synchronized (monitor) { if (blockOperationsToSameDocument && inflightDocumentIds.contains(document.getDocumentId())) { blockedDocumentsByDocumentId.put(document.getDocumentId(), document); return; } inflightDocumentIds.add(document.getDocumentId()); } sendToClusters(document); } private void sendToClusters(Document document) { synchronized (monitor) { boolean traceThisDoc = traceEveryXOperation > 0 && traceCounter++ % traceEveryXOperation == 0; docSendInfoByOperationId.put(document.getOperationId(), new DocumentSendInfo(document, traceThisDoc)); } for (ClusterConnection clusterConnection : clusters) { postToCluster(clusterConnection, document); } } private void postToCluster(ClusterConnection clusterConnection, Document document) { try { clusterConnection.post(document); } catch (EndpointIOException eio) { resultReceived(EndPointResultFactory.createError(eio.getEndpoint(), document.getOperationId(), eio), clusterConnection.getClusterId()); } } public String getStatsAsJson() { return operationStats.getStatsAsJson(); } public void close() { List<Exception> exceptions = closeClusters(); try { closeExecutor(); } catch (InterruptedException e) { exceptions.add(e); } if (exceptions.isEmpty()) { return; } if (exceptions.size() == 1) { if (exceptions.get(0) instanceof RuntimeException) { throw (RuntimeException) exceptions.get(0); } else { throw new RuntimeException(exceptions.get(0)); } } StringBuilder b = new StringBuilder(); b.append("Exception thrown while closing one or more clusters: "); for (int i = 0; i < exceptions.size(); i++) { Exception e = exceptions.get(i); b.append(Exceptions.toMessageString(e)); if (i != (exceptions.size() - 1)) { b.append(", "); } } throw new RuntimeException(b.toString(), exceptions.get(0)); } private void closeExecutor() throws InterruptedException { log.log(Level.FINE, "Shutting down timeout executor."); timeoutExecutor.shutdownNow(); log.log(Level.FINE, "Awaiting termination of already running timeout tasks."); if (! timeoutExecutor.awaitTermination(300, TimeUnit.SECONDS)) { log.severe("Did not manage to shut down the executors within 300 secs, system stuck?"); throw new RuntimeException("Did not manage to shut down retry threads. Please report problem."); } } }
Yes :(
public void close() { Instant lastResultReceived = Instant.now(); Optional<String> oldestIncompleteId = operationProcessor.oldestIncompleteResultId(); while (oldestIncompleteId.isPresent() && waitForOperations(lastResultReceived, sleepTimeMs, closeTimeoutMs)) { Optional<String> oldestIncompleteIdNow = operationProcessor.oldestIncompleteResultId(); if ( ! oldestIncompleteId.equals(oldestIncompleteIdNow)) lastResultReceived = Instant.now(); oldestIncompleteId = oldestIncompleteIdNow; } operationProcessor.close(); }
Instant lastResultReceived = Instant.now();
public void close() { Instant lastOldestResultReceivedAt = Instant.now(); Optional<String> oldestIncompleteId = operationProcessor.oldestIncompleteResultId(); while (oldestIncompleteId.isPresent() && waitForOperations(lastOldestResultReceivedAt, sleepTimeMs, closeTimeoutMs)) { Optional<String> oldestIncompleteIdNow = operationProcessor.oldestIncompleteResultId(); if ( ! oldestIncompleteId.equals(oldestIncompleteIdNow)) lastOldestResultReceivedAt = Instant.now(); oldestIncompleteId = oldestIncompleteIdNow; } operationProcessor.close(); }
class FeedClientImpl implements FeedClient { private final OperationProcessor operationProcessor; private final long closeTimeoutMs; private final long sleepTimeMs = 500; public FeedClientImpl( SessionParams sessionParams, ResultCallback resultCallback, ScheduledThreadPoolExecutor timeoutExecutor) { this.closeTimeoutMs = (1 + sessionParams.getConnectionParams().getMaxRetries()) * ( sessionParams.getFeedParams().getServerTimeout(TimeUnit.MILLISECONDS) + sessionParams.getFeedParams().getClientTimeout(TimeUnit.MILLISECONDS)); this.operationProcessor = new OperationProcessor( new IncompleteResultsThrottler( sessionParams.getThrottlerMinSize(), sessionParams.getClientQueueSize(), ()->System.currentTimeMillis(), new ThrottlePolicy()), resultCallback, sessionParams, timeoutExecutor); } @Override public void stream(String documentId, CharSequence documentData) { stream(documentId, documentData, null); } @Override public void stream(String documentId, CharSequence documentData, Object context) { CharsetEncoder charsetEncoder = StandardCharsets.UTF_8.newEncoder(); charsetEncoder.onMalformedInput(CodingErrorAction.REPORT); charsetEncoder.onUnmappableCharacter(CodingErrorAction.REPORT); Document document = new Document(documentId, documentData, context); operationProcessor.sendDocument(document); } @Override @Override public String getStatsAsJson() { return operationProcessor.getStatsAsJson(); } public static boolean waitForOperations(Instant lastResultReceived, long sleepTimeMs, long closeTimeoutMs) { if (lastResultReceived.plusMillis(closeTimeoutMs).isBefore(Instant.now())) { return false; } try { Thread.sleep(sleepTimeMs); } catch (InterruptedException e) { return false; } return true; } }
class FeedClientImpl implements FeedClient { private final OperationProcessor operationProcessor; private final long closeTimeoutMs; private final long sleepTimeMs = 500; public FeedClientImpl( SessionParams sessionParams, ResultCallback resultCallback, ScheduledThreadPoolExecutor timeoutExecutor) { this.closeTimeoutMs = (1 + sessionParams.getConnectionParams().getMaxRetries()) * ( sessionParams.getFeedParams().getServerTimeout(TimeUnit.MILLISECONDS) + sessionParams.getFeedParams().getClientTimeout(TimeUnit.MILLISECONDS)); this.operationProcessor = new OperationProcessor( new IncompleteResultsThrottler( sessionParams.getThrottlerMinSize(), sessionParams.getClientQueueSize(), ()->System.currentTimeMillis(), new ThrottlePolicy()), resultCallback, sessionParams, timeoutExecutor); } @Override public void stream(String documentId, CharSequence documentData) { stream(documentId, documentData, null); } @Override public void stream(String documentId, CharSequence documentData, Object context) { CharsetEncoder charsetEncoder = StandardCharsets.UTF_8.newEncoder(); charsetEncoder.onMalformedInput(CodingErrorAction.REPORT); charsetEncoder.onUnmappableCharacter(CodingErrorAction.REPORT); Document document = new Document(documentId, documentData, context); operationProcessor.sendDocument(document); } @Override @Override public String getStatsAsJson() { return operationProcessor.getStatsAsJson(); } public static boolean waitForOperations(Instant lastResultReceived, long sleepTimeMs, long closeTimeoutMs) { if (lastResultReceived.plusMillis(closeTimeoutMs).isBefore(Instant.now())) { return false; } try { Thread.sleep(sleepTimeMs); } catch (InterruptedException e) { return false; } return true; } }
Try to use the test methods to create private key and certificate so we don't need to keep actual private keys in the repo
private void addKeyPair(TlsConfig config) { setSecret(config.privateKeySecret(), readFile("test.key")); setSecret(config.certificateSecret(), readFile("test.crt")); }
setSecret(config.privateKeySecret(), readFile("test.key"));
private void addKeyPair(TlsConfig config) { setSecret(config.privateKeySecret(), KeyUtils.toPem(Keys.keyPair.getPrivate())); setSecret(config.certificateSecret(), X509CertificateUtils.toPem(Keys.certificate)); }
class SecretStoreMock extends com.yahoo.vespa.hosted.controller.integration.SecretStoreMock { @Inject public SecretStoreMock(TlsConfig config) { addKeyPair(config); } private static String readFile(String name) { try { return new String(Files.readAllBytes(Paths.get("src/test/resources/tls/").resolve(name)), StandardCharsets.UTF_8); } catch (IOException e) { throw new UncheckedIOException(e); } } }
class SecretStoreMock extends com.yahoo.vespa.hosted.controller.integration.SecretStoreMock { @Inject public SecretStoreMock(TlsConfig config) { addKeyPair(config); } }
Fixed.
private void addKeyPair(TlsConfig config) { setSecret(config.privateKeySecret(), readFile("test.key")); setSecret(config.certificateSecret(), readFile("test.crt")); }
setSecret(config.privateKeySecret(), readFile("test.key"));
private void addKeyPair(TlsConfig config) { setSecret(config.privateKeySecret(), KeyUtils.toPem(Keys.keyPair.getPrivate())); setSecret(config.certificateSecret(), X509CertificateUtils.toPem(Keys.certificate)); }
class SecretStoreMock extends com.yahoo.vespa.hosted.controller.integration.SecretStoreMock { @Inject public SecretStoreMock(TlsConfig config) { addKeyPair(config); } private static String readFile(String name) { try { return new String(Files.readAllBytes(Paths.get("src/test/resources/tls/").resolve(name)), StandardCharsets.UTF_8); } catch (IOException e) { throw new UncheckedIOException(e); } } }
class SecretStoreMock extends com.yahoo.vespa.hosted.controller.integration.SecretStoreMock { @Inject public SecretStoreMock(TlsConfig config) { addKeyPair(config); } }
You can use value.orElse(null) as well
public void set(Optional<T> value) { this.value = value.isPresent() ? value.get() : null; }
this.value = value.isPresent() ? value.get() : null;
public void set(Optional<T> value) { this.value = value.orElse(null); }
class SettableOptional<T> { private T value = null; /** Creates a new empty settable optional */ public SettableOptional() {} /** Creates a new settable optional with the given value */ public SettableOptional(T value) { this.value = value; } /** Creates a new settable optional with the given value, or an empty */ public SettableOptional(Optional<T> value) { this.value = value.isPresent() ? value.get() : null; } public boolean isPresent() { return value != null; } public T get() { if (value == null) throw new NoSuchElementException("No value present"); return value; } public void set(T value) { this.value = value; } public Optional<T> asOptional() { if (value == null) return Optional.empty(); return Optional.of(value); } }
class SettableOptional<T> { private T value = null; /** Creates a new empty settable optional */ public SettableOptional() {} /** Creates a new settable optional with the given value */ public SettableOptional(T value) { this.value = value; } /** Creates a new settable optional with the given value, or an empty */ public SettableOptional(Optional<T> value) { this.value = value.orElse(null); } public boolean isPresent() { return value != null; } public T get() { if (value == null) throw new NoSuchElementException("No value present"); return value; } public void set(T value) { this.value = value; } public Optional<T> asOptional() { if (value == null) return Optional.empty(); return Optional.of(value); } }
I'd also check if you can resume, and then see that the application is resumed.
public void testSuspension() { deployApp(testApp); assertFalse(applicationRepository.isSuspended(applicationId())); orchestrator.suspend(applicationId()); assertTrue(applicationRepository.isSuspended(applicationId())); }
assertTrue(applicationRepository.isSuspended(applicationId()));
public void testSuspension() { deployApp(testApp); assertFalse(applicationRepository.isSuspended(applicationId())); orchestrator.suspend(applicationId()); assertTrue(applicationRepository.isSuspended(applicationId())); }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static File testAppLogServerWithContainer = new File("src/test/apps/app-logserver-with-container"); private final static TenantName tenant1 = TenantName.from("test1"); private final static TenantName tenant2 = TenantName.from("test2"); private final static TenantName tenant3 = TenantName.from("test3"); private final static Clock clock = Clock.systemUTC(); private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private SessionHandlerTest.MockProvisioner provisioner; private OrchestratorMock orchestrator; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenant1); tenantRepository.addTenant(tenant2); tenantRepository.addTenant(tenant3); orchestrator = new OrchestratorMock(); provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test @Test public void getLogs() { WireMockServer wireMock = new WireMockServer(wireMockConfig().port(8080)); wireMock.start(); WireMock.configureFor("localhost", wireMock.port()); stubFor(get(urlEqualTo("/logs")) .willReturn(aResponse() .withStatus(200))); wireMock.start(); deployApp(testAppLogServerWithContainer); HttpResponse response = applicationRepository.getLogs(applicationId(), ""); assertEquals(200, response.getStatus()); wireMock.stop(); } @Test(expected = IllegalArgumentException.class) public void getLogsNoContainerOnLogServerHostShouldThrowException() { deployApp(testApp); applicationRepository.getLogs(applicationId(), ""); } @Test public void deleteUnusedTenants() { Instant now = ManualClock.at("1970-01-01T01:00:00"); deployApp(testApp); deployApp(testApp, new PrepareParams.Builder().applicationId(applicationId(tenant2)).build()); Duration ttlForUnusedTenant = Duration.ofHours(1); assertTrue(applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).isEmpty()); ttlForUnusedTenant = Duration.ofMillis(1); assertEquals(tenant3, applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).iterator().next()); applicationRepository.delete(applicationId()); Set<TenantName> tenantsDeleted = applicationRepository.deleteUnusedTenants(Duration.ofMillis(1), now); assertTrue(tenantsDeleted.contains(tenant1)); assertFalse(tenantsDeleted.contains(tenant2)); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); ApplicationId testerApp = ApplicationId.from("tenant1", "application1", "default-t"); Version sessionVersion = Version.fromString("5.0"); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, sessionVersion, false)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, true)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, sessionVersion, false)); } @Test public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = createFilereferenceOnDisk(new File(fileReferencesDir, "foo"), Instant.now().minus(Duration.ofDays(15))); File filereferenceDir2 = createFilereferenceOnDisk(new File(fileReferencesDir, "baz"), Instant.now()); tenantRepository.addTenant(tenant1); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); assertTrue(filereferenceDir2.exists()); } private File createFilereferenceOnDisk(File filereferenceDir, Instant lastModifiedTime) { assertTrue(filereferenceDir.mkdir()); File bar = new File(filereferenceDir, "file"); IOUtils.writeFile(bar, Utf8.toBytes("test")); assertTrue(filereferenceDir.setLastModified(lastModifiedTime.toEpochMilli())); return filereferenceDir; } @Test public void delete() { { PrepareResult result = deployApp(testApp); long sessionId = result.sessionId(); Tenant tenant = tenantRepository.getTenant(applicationId().tenant()); LocalSession applicationData = tenant.getLocalSessionRepo().getSession(sessionId); assertNotNull(applicationData); assertNotNull(applicationData.getApplicationId()); assertNotNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertNull(tenant.getLocalSessionRepo().getSession(sessionId)); assertNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertTrue(provisioner.removed); assertThat(provisioner.lastApplicationId.tenant(), is(tenant.getName())); assertThat(provisioner.lastApplicationId, is(applicationId())); assertFalse(applicationRepository.deleteApplication(applicationId())); } { deployApp(testApp); assertTrue(applicationRepository.deleteApplication(applicationId())); deployApp(testApp); ApplicationId fooId = applicationId(tenant2); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); deployApp(testApp, prepareParams2); assertNotNull(applicationRepository.getActiveSession(fooId)); assertTrue(applicationRepository.deleteApplication(fooId)); assertThat(provisioner.lastApplicationId, is(fooId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); } } @Test public void deleteLegacy() { deployApp(testApp); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplicationLegacy(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertFalse(applicationRepository.deleteApplicationLegacy(applicationId())); } @Test public void testDeletingInactiveSessions() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir().getAbsolutePath()) .configDefinitionsDir(Files.createTempDir().getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester(configserverConfig, clock); tester.deployApp("src/test/apps/app", clock.instant()); clock.advance(Duration.ofSeconds(10)); Optional<Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((com.yahoo.vespa.config.server.deploy.Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId())); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); tester.applicationRepository().deleteExpiredLocalSessions(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(1, sessions.size()); assertEquals(3, new ArrayList<>(sessions).get(0).getSessionId()); assertEquals(0, applicationRepository.deleteExpiredRemoteSessions(Duration.ofSeconds(0))); } private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); ApplicationId applicationId = applicationId(); long sessionId = applicationRepository.createSession(applicationId, timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenantRepository.getTenant(applicationId.tenant()), sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenant1, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationId applicationId(TenantName tenantName) { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static File testAppLogServerWithContainer = new File("src/test/apps/app-logserver-with-container"); private final static TenantName tenant1 = TenantName.from("test1"); private final static TenantName tenant2 = TenantName.from("test2"); private final static TenantName tenant3 = TenantName.from("test3"); private final static Clock clock = Clock.systemUTC(); private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private SessionHandlerTest.MockProvisioner provisioner; private OrchestratorMock orchestrator; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenant1); tenantRepository.addTenant(tenant2); tenantRepository.addTenant(tenant3); orchestrator = new OrchestratorMock(); provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test @Test public void getLogs() { WireMockServer wireMock = new WireMockServer(wireMockConfig().port(8080)); wireMock.start(); WireMock.configureFor("localhost", wireMock.port()); stubFor(get(urlEqualTo("/logs")) .willReturn(aResponse() .withStatus(200))); wireMock.start(); deployApp(testAppLogServerWithContainer); HttpResponse response = applicationRepository.getLogs(applicationId(), ""); assertEquals(200, response.getStatus()); wireMock.stop(); } @Test(expected = IllegalArgumentException.class) public void getLogsNoContainerOnLogServerHostShouldThrowException() { deployApp(testApp); applicationRepository.getLogs(applicationId(), ""); } @Test public void deleteUnusedTenants() { Instant now = ManualClock.at("1970-01-01T01:00:00"); deployApp(testApp); deployApp(testApp, new PrepareParams.Builder().applicationId(applicationId(tenant2)).build()); Duration ttlForUnusedTenant = Duration.ofHours(1); assertTrue(applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).isEmpty()); ttlForUnusedTenant = Duration.ofMillis(1); assertEquals(tenant3, applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).iterator().next()); applicationRepository.delete(applicationId()); Set<TenantName> tenantsDeleted = applicationRepository.deleteUnusedTenants(Duration.ofMillis(1), now); assertTrue(tenantsDeleted.contains(tenant1)); assertFalse(tenantsDeleted.contains(tenant2)); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); ApplicationId testerApp = ApplicationId.from("tenant1", "application1", "default-t"); Version sessionVersion = Version.fromString("5.0"); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, sessionVersion, false)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, true)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, sessionVersion, false)); } @Test public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = createFilereferenceOnDisk(new File(fileReferencesDir, "foo"), Instant.now().minus(Duration.ofDays(15))); File filereferenceDir2 = createFilereferenceOnDisk(new File(fileReferencesDir, "baz"), Instant.now()); tenantRepository.addTenant(tenant1); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); assertTrue(filereferenceDir2.exists()); } private File createFilereferenceOnDisk(File filereferenceDir, Instant lastModifiedTime) { assertTrue(filereferenceDir.mkdir()); File bar = new File(filereferenceDir, "file"); IOUtils.writeFile(bar, Utf8.toBytes("test")); assertTrue(filereferenceDir.setLastModified(lastModifiedTime.toEpochMilli())); return filereferenceDir; } @Test public void delete() { { PrepareResult result = deployApp(testApp); long sessionId = result.sessionId(); Tenant tenant = tenantRepository.getTenant(applicationId().tenant()); LocalSession applicationData = tenant.getLocalSessionRepo().getSession(sessionId); assertNotNull(applicationData); assertNotNull(applicationData.getApplicationId()); assertNotNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertNull(tenant.getLocalSessionRepo().getSession(sessionId)); assertNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertTrue(provisioner.removed); assertThat(provisioner.lastApplicationId.tenant(), is(tenant.getName())); assertThat(provisioner.lastApplicationId, is(applicationId())); assertFalse(applicationRepository.deleteApplication(applicationId())); } { deployApp(testApp); assertTrue(applicationRepository.deleteApplication(applicationId())); deployApp(testApp); ApplicationId fooId = applicationId(tenant2); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); deployApp(testApp, prepareParams2); assertNotNull(applicationRepository.getActiveSession(fooId)); assertTrue(applicationRepository.deleteApplication(fooId)); assertThat(provisioner.lastApplicationId, is(fooId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); } } @Test public void deleteLegacy() { deployApp(testApp); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplicationLegacy(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertFalse(applicationRepository.deleteApplicationLegacy(applicationId())); } @Test public void testDeletingInactiveSessions() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir().getAbsolutePath()) .configDefinitionsDir(Files.createTempDir().getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester(configserverConfig, clock); tester.deployApp("src/test/apps/app", clock.instant()); clock.advance(Duration.ofSeconds(10)); Optional<Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((com.yahoo.vespa.config.server.deploy.Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId())); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); tester.applicationRepository().deleteExpiredLocalSessions(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(1, sessions.size()); assertEquals(3, new ArrayList<>(sessions).get(0).getSessionId()); assertEquals(0, applicationRepository.deleteExpiredRemoteSessions(Duration.ofSeconds(0))); } private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); ApplicationId applicationId = applicationId(); long sessionId = applicationRepository.createSession(applicationId, timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenantRepository.getTenant(applicationId.tenant()), sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenant1, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationId applicationId(TenantName tenantName) { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
Well ... nevermind. It's just backed by a mock, huh?
public void testSuspension() { deployApp(testApp); assertFalse(applicationRepository.isSuspended(applicationId())); orchestrator.suspend(applicationId()); assertTrue(applicationRepository.isSuspended(applicationId())); }
assertTrue(applicationRepository.isSuspended(applicationId()));
public void testSuspension() { deployApp(testApp); assertFalse(applicationRepository.isSuspended(applicationId())); orchestrator.suspend(applicationId()); assertTrue(applicationRepository.isSuspended(applicationId())); }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static File testAppLogServerWithContainer = new File("src/test/apps/app-logserver-with-container"); private final static TenantName tenant1 = TenantName.from("test1"); private final static TenantName tenant2 = TenantName.from("test2"); private final static TenantName tenant3 = TenantName.from("test3"); private final static Clock clock = Clock.systemUTC(); private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private SessionHandlerTest.MockProvisioner provisioner; private OrchestratorMock orchestrator; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenant1); tenantRepository.addTenant(tenant2); tenantRepository.addTenant(tenant3); orchestrator = new OrchestratorMock(); provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test @Test public void getLogs() { WireMockServer wireMock = new WireMockServer(wireMockConfig().port(8080)); wireMock.start(); WireMock.configureFor("localhost", wireMock.port()); stubFor(get(urlEqualTo("/logs")) .willReturn(aResponse() .withStatus(200))); wireMock.start(); deployApp(testAppLogServerWithContainer); HttpResponse response = applicationRepository.getLogs(applicationId(), ""); assertEquals(200, response.getStatus()); wireMock.stop(); } @Test(expected = IllegalArgumentException.class) public void getLogsNoContainerOnLogServerHostShouldThrowException() { deployApp(testApp); applicationRepository.getLogs(applicationId(), ""); } @Test public void deleteUnusedTenants() { Instant now = ManualClock.at("1970-01-01T01:00:00"); deployApp(testApp); deployApp(testApp, new PrepareParams.Builder().applicationId(applicationId(tenant2)).build()); Duration ttlForUnusedTenant = Duration.ofHours(1); assertTrue(applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).isEmpty()); ttlForUnusedTenant = Duration.ofMillis(1); assertEquals(tenant3, applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).iterator().next()); applicationRepository.delete(applicationId()); Set<TenantName> tenantsDeleted = applicationRepository.deleteUnusedTenants(Duration.ofMillis(1), now); assertTrue(tenantsDeleted.contains(tenant1)); assertFalse(tenantsDeleted.contains(tenant2)); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); ApplicationId testerApp = ApplicationId.from("tenant1", "application1", "default-t"); Version sessionVersion = Version.fromString("5.0"); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, sessionVersion, false)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, true)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, sessionVersion, false)); } @Test public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = createFilereferenceOnDisk(new File(fileReferencesDir, "foo"), Instant.now().minus(Duration.ofDays(15))); File filereferenceDir2 = createFilereferenceOnDisk(new File(fileReferencesDir, "baz"), Instant.now()); tenantRepository.addTenant(tenant1); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); assertTrue(filereferenceDir2.exists()); } private File createFilereferenceOnDisk(File filereferenceDir, Instant lastModifiedTime) { assertTrue(filereferenceDir.mkdir()); File bar = new File(filereferenceDir, "file"); IOUtils.writeFile(bar, Utf8.toBytes("test")); assertTrue(filereferenceDir.setLastModified(lastModifiedTime.toEpochMilli())); return filereferenceDir; } @Test public void delete() { { PrepareResult result = deployApp(testApp); long sessionId = result.sessionId(); Tenant tenant = tenantRepository.getTenant(applicationId().tenant()); LocalSession applicationData = tenant.getLocalSessionRepo().getSession(sessionId); assertNotNull(applicationData); assertNotNull(applicationData.getApplicationId()); assertNotNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertNull(tenant.getLocalSessionRepo().getSession(sessionId)); assertNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertTrue(provisioner.removed); assertThat(provisioner.lastApplicationId.tenant(), is(tenant.getName())); assertThat(provisioner.lastApplicationId, is(applicationId())); assertFalse(applicationRepository.deleteApplication(applicationId())); } { deployApp(testApp); assertTrue(applicationRepository.deleteApplication(applicationId())); deployApp(testApp); ApplicationId fooId = applicationId(tenant2); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); deployApp(testApp, prepareParams2); assertNotNull(applicationRepository.getActiveSession(fooId)); assertTrue(applicationRepository.deleteApplication(fooId)); assertThat(provisioner.lastApplicationId, is(fooId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); } } @Test public void deleteLegacy() { deployApp(testApp); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplicationLegacy(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertFalse(applicationRepository.deleteApplicationLegacy(applicationId())); } @Test public void testDeletingInactiveSessions() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir().getAbsolutePath()) .configDefinitionsDir(Files.createTempDir().getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester(configserverConfig, clock); tester.deployApp("src/test/apps/app", clock.instant()); clock.advance(Duration.ofSeconds(10)); Optional<Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((com.yahoo.vespa.config.server.deploy.Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId())); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); tester.applicationRepository().deleteExpiredLocalSessions(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(1, sessions.size()); assertEquals(3, new ArrayList<>(sessions).get(0).getSessionId()); assertEquals(0, applicationRepository.deleteExpiredRemoteSessions(Duration.ofSeconds(0))); } private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); ApplicationId applicationId = applicationId(); long sessionId = applicationRepository.createSession(applicationId, timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenantRepository.getTenant(applicationId.tenant()), sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenant1, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationId applicationId(TenantName tenantName) { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static File testAppLogServerWithContainer = new File("src/test/apps/app-logserver-with-container"); private final static TenantName tenant1 = TenantName.from("test1"); private final static TenantName tenant2 = TenantName.from("test2"); private final static TenantName tenant3 = TenantName.from("test3"); private final static Clock clock = Clock.systemUTC(); private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private SessionHandlerTest.MockProvisioner provisioner; private OrchestratorMock orchestrator; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenant1); tenantRepository.addTenant(tenant2); tenantRepository.addTenant(tenant3); orchestrator = new OrchestratorMock(); provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test @Test public void getLogs() { WireMockServer wireMock = new WireMockServer(wireMockConfig().port(8080)); wireMock.start(); WireMock.configureFor("localhost", wireMock.port()); stubFor(get(urlEqualTo("/logs")) .willReturn(aResponse() .withStatus(200))); wireMock.start(); deployApp(testAppLogServerWithContainer); HttpResponse response = applicationRepository.getLogs(applicationId(), ""); assertEquals(200, response.getStatus()); wireMock.stop(); } @Test(expected = IllegalArgumentException.class) public void getLogsNoContainerOnLogServerHostShouldThrowException() { deployApp(testApp); applicationRepository.getLogs(applicationId(), ""); } @Test public void deleteUnusedTenants() { Instant now = ManualClock.at("1970-01-01T01:00:00"); deployApp(testApp); deployApp(testApp, new PrepareParams.Builder().applicationId(applicationId(tenant2)).build()); Duration ttlForUnusedTenant = Duration.ofHours(1); assertTrue(applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).isEmpty()); ttlForUnusedTenant = Duration.ofMillis(1); assertEquals(tenant3, applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).iterator().next()); applicationRepository.delete(applicationId()); Set<TenantName> tenantsDeleted = applicationRepository.deleteUnusedTenants(Duration.ofMillis(1), now); assertTrue(tenantsDeleted.contains(tenant1)); assertFalse(tenantsDeleted.contains(tenant2)); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); ApplicationId testerApp = ApplicationId.from("tenant1", "application1", "default-t"); Version sessionVersion = Version.fromString("5.0"); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, sessionVersion, false)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, true)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, sessionVersion, false)); } @Test public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = createFilereferenceOnDisk(new File(fileReferencesDir, "foo"), Instant.now().minus(Duration.ofDays(15))); File filereferenceDir2 = createFilereferenceOnDisk(new File(fileReferencesDir, "baz"), Instant.now()); tenantRepository.addTenant(tenant1); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); assertTrue(filereferenceDir2.exists()); } private File createFilereferenceOnDisk(File filereferenceDir, Instant lastModifiedTime) { assertTrue(filereferenceDir.mkdir()); File bar = new File(filereferenceDir, "file"); IOUtils.writeFile(bar, Utf8.toBytes("test")); assertTrue(filereferenceDir.setLastModified(lastModifiedTime.toEpochMilli())); return filereferenceDir; } @Test public void delete() { { PrepareResult result = deployApp(testApp); long sessionId = result.sessionId(); Tenant tenant = tenantRepository.getTenant(applicationId().tenant()); LocalSession applicationData = tenant.getLocalSessionRepo().getSession(sessionId); assertNotNull(applicationData); assertNotNull(applicationData.getApplicationId()); assertNotNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertNull(tenant.getLocalSessionRepo().getSession(sessionId)); assertNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertTrue(provisioner.removed); assertThat(provisioner.lastApplicationId.tenant(), is(tenant.getName())); assertThat(provisioner.lastApplicationId, is(applicationId())); assertFalse(applicationRepository.deleteApplication(applicationId())); } { deployApp(testApp); assertTrue(applicationRepository.deleteApplication(applicationId())); deployApp(testApp); ApplicationId fooId = applicationId(tenant2); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); deployApp(testApp, prepareParams2); assertNotNull(applicationRepository.getActiveSession(fooId)); assertTrue(applicationRepository.deleteApplication(fooId)); assertThat(provisioner.lastApplicationId, is(fooId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); } } @Test public void deleteLegacy() { deployApp(testApp); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplicationLegacy(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertFalse(applicationRepository.deleteApplicationLegacy(applicationId())); } @Test public void testDeletingInactiveSessions() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir().getAbsolutePath()) .configDefinitionsDir(Files.createTempDir().getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester(configserverConfig, clock); tester.deployApp("src/test/apps/app", clock.instant()); clock.advance(Duration.ofSeconds(10)); Optional<Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((com.yahoo.vespa.config.server.deploy.Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId())); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); tester.applicationRepository().deleteExpiredLocalSessions(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(1, sessions.size()); assertEquals(3, new ArrayList<>(sessions).get(0).getSessionId()); assertEquals(0, applicationRepository.deleteExpiredRemoteSessions(Duration.ofSeconds(0))); } private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); ApplicationId applicationId = applicationId(); long sessionId = applicationRepository.createSession(applicationId, timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenantRepository.getTenant(applicationId.tenant()), sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenant1, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationId applicationId(TenantName tenantName) { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
Perhaps ```java return application.deployments().keySet().stream() .anyMatch(other -> ! other.equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), other))); ```
private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.deployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; }
return false;
private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.deployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().builtInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().builtInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.get().changeAt(controller.clock().instant()).isPresent() && ! application.get().deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.get().change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.get().change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> !isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { testJobs = testJobs(application, Versions.from(application, controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } /** Returns whether job can trigger at given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().builtInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant()) && application.changeAt(clock.instant()).application().isPresent() && ! application.deploymentJobs().hasFailures()) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if (!completion.isPresent() && canTrigger(jobType, versions, application)) { jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) { application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); if (application.get().deploymentJobs().deployedInternally()) for (Run run : jobs.active()) if (run.id().application().equals(report.applicationId())) jobs.abort(run.id()); } else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().deployedInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().deployedInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.get().changeAt(controller.clock().instant()).isPresent() && ! application.get().deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.get().change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.get().change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> !isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { testJobs = testJobs(application, Versions.from(application, controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } /** Returns whether job can trigger at given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().deployedInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant()) && application.changeAt(clock.instant()).application().isPresent() && ! application.deploymentJobs().hasFailures()) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if (!completion.isPresent() && canTrigger(jobType, versions, application)) { jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
> The last choice could be used forcibly without regard for connectivity issues (if some result is better than no result) Yes, that sounds good. I think some result is better than none from the point of view of this code since the ping logic determined that the group should be serving taking the coverage config into account.
public Optional<SearchInvoker> getSearchInvoker(Query query, List<SearchCluster.Node> nodes) { Map<Integer, SearchInvoker> invokers = new HashMap<>(); for (SearchCluster.Node node : nodes) { if (node.isWorking()) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); if (backend.probeConnection()) { invokers.put(node.key(), new FS4SearchInvoker(searcher, query, backend.openChannel(), node)); } else { return Optional.empty(); } } } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedSearchInvoker(invokers)); } }
return Optional.empty();
public Optional<SearchInvoker> getSearchInvoker(Query query, List<SearchCluster.Node> nodes) { Map<Integer, SearchInvoker> invokers = new HashMap<>(); for (SearchCluster.Node node : nodes) { if (node.isWorking()) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); if (backend.probeConnection()) { invokers.put(node.key(), new FS4SearchInvoker(searcher, query, backend.openChannel(), node)); } else { return Optional.empty(); } } } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedSearchInvoker(invokers)); } }
class FS4InvokerFactory { private final FS4ResourcePool fs4ResourcePool; private final VespaBackEndSearcher searcher; private final ImmutableMap<Integer, SearchCluster.Node> nodesByKey; public FS4InvokerFactory(FS4ResourcePool fs4ResourcePool, SearchCluster searchCluster, VespaBackEndSearcher searcher) { this.fs4ResourcePool = fs4ResourcePool; this.searcher = searcher; ImmutableMap.Builder<Integer, SearchCluster.Node> builder = ImmutableMap.builder(); searchCluster.groups().values().forEach(group -> group.nodes().forEach(node -> builder.put(node.key(), node))); this.nodesByKey = builder.build(); } public SearchInvoker getSearchInvoker(Query query, SearchCluster.Node node) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); return new FS4SearchInvoker(searcher, query, backend.openChannel(), node); } public FillInvoker getFillInvoker(Query query, SearchCluster.Node node) { return new FS4FillInvoker(searcher, query, fs4ResourcePool, node.hostname(), node.fs4port(), node.key()); } public Optional<FillInvoker> getFillInvoker(Result result) { Collection<Integer> requiredNodes = requiredFillNodes(result); Query query = result.getQuery(); Map<Integer, FillInvoker> invokers = new HashMap<>(); for (Integer distKey : requiredNodes) { SearchCluster.Node node = nodesByKey.get(distKey); if (node == null) { return Optional.empty(); } invokers.put(distKey, getFillInvoker(query, node)); } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedFillInvoker(invokers)); } } private static Collection<Integer> requiredFillNodes(Result result) { Set<Integer> requiredNodes = new HashSet<>(); for (Iterator<Hit> i = result.hits().unorderedDeepIterator(); i.hasNext();) { Hit h = i.next(); if (h instanceof FastHit) { FastHit hit = (FastHit) h; requiredNodes.add(hit.getDistributionKey()); } } return requiredNodes; } }
class FS4InvokerFactory { private final FS4ResourcePool fs4ResourcePool; private final VespaBackEndSearcher searcher; private final ImmutableMap<Integer, SearchCluster.Node> nodesByKey; public FS4InvokerFactory(FS4ResourcePool fs4ResourcePool, SearchCluster searchCluster, VespaBackEndSearcher searcher) { this.fs4ResourcePool = fs4ResourcePool; this.searcher = searcher; ImmutableMap.Builder<Integer, SearchCluster.Node> builder = ImmutableMap.builder(); searchCluster.groups().values().forEach(group -> group.nodes().forEach(node -> builder.put(node.key(), node))); this.nodesByKey = builder.build(); } public SearchInvoker getSearchInvoker(Query query, SearchCluster.Node node) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); return new FS4SearchInvoker(searcher, query, backend.openChannel(), node); } /** * Create a {@link SearchInvoker} for a list of content nodes. * * @param query the search query being processed * @param nodes pre-selected list of content nodes * @return Optional containing the SearchInvoker or <i>empty</i> if some node in the list is invalid */ public FillInvoker getFillInvoker(Query query, SearchCluster.Node node) { return new FS4FillInvoker(searcher, query, fs4ResourcePool, node.hostname(), node.fs4port(), node.key()); } /** * Create a {@link FillInvoker} for a the hits in a {@link Result}. * * @param result the Result containing hits that need to be filled * @return Optional containing the FillInvoker or <i>empty</i> if some hit is from an unknown content node */ public Optional<FillInvoker> getFillInvoker(Result result) { Collection<Integer> requiredNodes = requiredFillNodes(result); Query query = result.getQuery(); Map<Integer, FillInvoker> invokers = new HashMap<>(); for (Integer distKey : requiredNodes) { SearchCluster.Node node = nodesByKey.get(distKey); if (node == null) { return Optional.empty(); } invokers.put(distKey, getFillInvoker(query, node)); } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedFillInvoker(invokers)); } } private static Collection<Integer> requiredFillNodes(Result result) { Set<Integer> requiredNodes = new HashSet<>(); for (Iterator<Hit> i = result.hits().unorderedDeepIterator(); i.hasNext();) { Hit h = i.next(); if (h instanceof FastHit) { FastHit hit = (FastHit) h; requiredNodes.add(hit.getDistributionKey()); } } return requiredNodes; } }
Yep.
public void testSuspension() { deployApp(testApp); assertFalse(applicationRepository.isSuspended(applicationId())); orchestrator.suspend(applicationId()); assertTrue(applicationRepository.isSuspended(applicationId())); }
assertTrue(applicationRepository.isSuspended(applicationId()));
public void testSuspension() { deployApp(testApp); assertFalse(applicationRepository.isSuspended(applicationId())); orchestrator.suspend(applicationId()); assertTrue(applicationRepository.isSuspended(applicationId())); }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static File testAppLogServerWithContainer = new File("src/test/apps/app-logserver-with-container"); private final static TenantName tenant1 = TenantName.from("test1"); private final static TenantName tenant2 = TenantName.from("test2"); private final static TenantName tenant3 = TenantName.from("test3"); private final static Clock clock = Clock.systemUTC(); private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private SessionHandlerTest.MockProvisioner provisioner; private OrchestratorMock orchestrator; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenant1); tenantRepository.addTenant(tenant2); tenantRepository.addTenant(tenant3); orchestrator = new OrchestratorMock(); provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test @Test public void getLogs() { WireMockServer wireMock = new WireMockServer(wireMockConfig().port(8080)); wireMock.start(); WireMock.configureFor("localhost", wireMock.port()); stubFor(get(urlEqualTo("/logs")) .willReturn(aResponse() .withStatus(200))); wireMock.start(); deployApp(testAppLogServerWithContainer); HttpResponse response = applicationRepository.getLogs(applicationId(), ""); assertEquals(200, response.getStatus()); wireMock.stop(); } @Test(expected = IllegalArgumentException.class) public void getLogsNoContainerOnLogServerHostShouldThrowException() { deployApp(testApp); applicationRepository.getLogs(applicationId(), ""); } @Test public void deleteUnusedTenants() { Instant now = ManualClock.at("1970-01-01T01:00:00"); deployApp(testApp); deployApp(testApp, new PrepareParams.Builder().applicationId(applicationId(tenant2)).build()); Duration ttlForUnusedTenant = Duration.ofHours(1); assertTrue(applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).isEmpty()); ttlForUnusedTenant = Duration.ofMillis(1); assertEquals(tenant3, applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).iterator().next()); applicationRepository.delete(applicationId()); Set<TenantName> tenantsDeleted = applicationRepository.deleteUnusedTenants(Duration.ofMillis(1), now); assertTrue(tenantsDeleted.contains(tenant1)); assertFalse(tenantsDeleted.contains(tenant2)); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); ApplicationId testerApp = ApplicationId.from("tenant1", "application1", "default-t"); Version sessionVersion = Version.fromString("5.0"); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, sessionVersion, false)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, true)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, sessionVersion, false)); } @Test public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = createFilereferenceOnDisk(new File(fileReferencesDir, "foo"), Instant.now().minus(Duration.ofDays(15))); File filereferenceDir2 = createFilereferenceOnDisk(new File(fileReferencesDir, "baz"), Instant.now()); tenantRepository.addTenant(tenant1); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); assertTrue(filereferenceDir2.exists()); } private File createFilereferenceOnDisk(File filereferenceDir, Instant lastModifiedTime) { assertTrue(filereferenceDir.mkdir()); File bar = new File(filereferenceDir, "file"); IOUtils.writeFile(bar, Utf8.toBytes("test")); assertTrue(filereferenceDir.setLastModified(lastModifiedTime.toEpochMilli())); return filereferenceDir; } @Test public void delete() { { PrepareResult result = deployApp(testApp); long sessionId = result.sessionId(); Tenant tenant = tenantRepository.getTenant(applicationId().tenant()); LocalSession applicationData = tenant.getLocalSessionRepo().getSession(sessionId); assertNotNull(applicationData); assertNotNull(applicationData.getApplicationId()); assertNotNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertNull(tenant.getLocalSessionRepo().getSession(sessionId)); assertNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertTrue(provisioner.removed); assertThat(provisioner.lastApplicationId.tenant(), is(tenant.getName())); assertThat(provisioner.lastApplicationId, is(applicationId())); assertFalse(applicationRepository.deleteApplication(applicationId())); } { deployApp(testApp); assertTrue(applicationRepository.deleteApplication(applicationId())); deployApp(testApp); ApplicationId fooId = applicationId(tenant2); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); deployApp(testApp, prepareParams2); assertNotNull(applicationRepository.getActiveSession(fooId)); assertTrue(applicationRepository.deleteApplication(fooId)); assertThat(provisioner.lastApplicationId, is(fooId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); } } @Test public void deleteLegacy() { deployApp(testApp); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplicationLegacy(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertFalse(applicationRepository.deleteApplicationLegacy(applicationId())); } @Test public void testDeletingInactiveSessions() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir().getAbsolutePath()) .configDefinitionsDir(Files.createTempDir().getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester(configserverConfig, clock); tester.deployApp("src/test/apps/app", clock.instant()); clock.advance(Duration.ofSeconds(10)); Optional<Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((com.yahoo.vespa.config.server.deploy.Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId())); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); tester.applicationRepository().deleteExpiredLocalSessions(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(1, sessions.size()); assertEquals(3, new ArrayList<>(sessions).get(0).getSessionId()); assertEquals(0, applicationRepository.deleteExpiredRemoteSessions(Duration.ofSeconds(0))); } private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); ApplicationId applicationId = applicationId(); long sessionId = applicationRepository.createSession(applicationId, timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenantRepository.getTenant(applicationId.tenant()), sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenant1, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationId applicationId(TenantName tenantName) { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
class ApplicationRepositoryTest { private final static File testApp = new File("src/test/apps/app"); private final static File testAppJdiscOnly = new File("src/test/apps/app-jdisc-only"); private final static File testAppJdiscOnlyRestart = new File("src/test/apps/app-jdisc-only-restart"); private final static File testAppLogServerWithContainer = new File("src/test/apps/app-logserver-with-container"); private final static TenantName tenant1 = TenantName.from("test1"); private final static TenantName tenant2 = TenantName.from("test2"); private final static TenantName tenant3 = TenantName.from("test3"); private final static Clock clock = Clock.systemUTC(); private ApplicationRepository applicationRepository; private TenantRepository tenantRepository; private SessionHandlerTest.MockProvisioner provisioner; private OrchestratorMock orchestrator; private TimeoutBudget timeoutBudget; @Rule public TemporaryFolder temporaryFolder = new TemporaryFolder(); @Before public void setup() { Curator curator = new MockCurator(); tenantRepository = new TenantRepository(new TestComponentRegistry.Builder() .curator(curator) .build()); tenantRepository.addTenant(tenant1); tenantRepository.addTenant(tenant2); tenantRepository.addTenant(tenant3); orchestrator = new OrchestratorMock(); provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); } @Test public void prepareAndActivate() throws IOException { PrepareResult result = prepareAndActivateApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void prepareAndActivateWithRestart() throws IOException { prepareAndActivateApp(testAppJdiscOnly); PrepareResult result = prepareAndActivateApp(testAppJdiscOnlyRestart); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertFalse(result.configChangeActions().getRestartActions().isEmpty()); } @Test public void createAndPrepareAndActivate() { PrepareResult result = deployApp(testApp); assertTrue(result.configChangeActions().getRefeedActions().isEmpty()); assertTrue(result.configChangeActions().getRestartActions().isEmpty()); } @Test @Test public void getLogs() { WireMockServer wireMock = new WireMockServer(wireMockConfig().port(8080)); wireMock.start(); WireMock.configureFor("localhost", wireMock.port()); stubFor(get(urlEqualTo("/logs")) .willReturn(aResponse() .withStatus(200))); wireMock.start(); deployApp(testAppLogServerWithContainer); HttpResponse response = applicationRepository.getLogs(applicationId(), ""); assertEquals(200, response.getStatus()); wireMock.stop(); } @Test(expected = IllegalArgumentException.class) public void getLogsNoContainerOnLogServerHostShouldThrowException() { deployApp(testApp); applicationRepository.getLogs(applicationId(), ""); } @Test public void deleteUnusedTenants() { Instant now = ManualClock.at("1970-01-01T01:00:00"); deployApp(testApp); deployApp(testApp, new PrepareParams.Builder().applicationId(applicationId(tenant2)).build()); Duration ttlForUnusedTenant = Duration.ofHours(1); assertTrue(applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).isEmpty()); ttlForUnusedTenant = Duration.ofMillis(1); assertEquals(tenant3, applicationRepository.deleteUnusedTenants(ttlForUnusedTenant, now).iterator().next()); applicationRepository.delete(applicationId()); Set<TenantName> tenantsDeleted = applicationRepository.deleteUnusedTenants(Duration.ofMillis(1), now); assertTrue(tenantsDeleted.contains(tenant1)); assertFalse(tenantsDeleted.contains(tenant2)); } @Test public void decideVersion() { ApplicationId regularApp = ApplicationId.from("tenant1", "application1", "default"); ApplicationId systemApp = ApplicationId.from("hosted-vespa", "routing", "default"); ApplicationId testerApp = ApplicationId.from("tenant1", "application1", "default-t"); Version sessionVersion = Version.fromString("5.0"); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(systemApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.prod, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(testerApp, Environment.perf, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.prod, sessionVersion, false)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, false)); assertEquals(sessionVersion, ApplicationRepository.decideVersion(regularApp, Environment.dev, sessionVersion, true)); assertEquals(Vtag.currentVersion, ApplicationRepository.decideVersion(regularApp, Environment.perf, sessionVersion, false)); } @Test public void deleteUnusedFileReferences() throws IOException { File fileReferencesDir = temporaryFolder.newFolder(); File filereferenceDir = createFilereferenceOnDisk(new File(fileReferencesDir, "foo"), Instant.now().minus(Duration.ofDays(15))); File filereferenceDir2 = createFilereferenceOnDisk(new File(fileReferencesDir, "baz"), Instant.now()); tenantRepository.addTenant(tenant1); Provisioner provisioner = new SessionHandlerTest.MockProvisioner(); applicationRepository = new ApplicationRepository(tenantRepository, provisioner, orchestrator, clock); timeoutBudget = new TimeoutBudget(clock, Duration.ofSeconds(60)); PrepareParams prepareParams = new PrepareParams.Builder().applicationId(applicationId()).ignoreValidationErrors(true).build(); deployApp(new File("src/test/apps/app"), prepareParams); Set<String> toBeDeleted = applicationRepository.deleteUnusedFiledistributionReferences(fileReferencesDir); assertEquals(Collections.singleton("foo"), toBeDeleted); assertFalse(filereferenceDir.exists()); assertTrue(filereferenceDir2.exists()); } private File createFilereferenceOnDisk(File filereferenceDir, Instant lastModifiedTime) { assertTrue(filereferenceDir.mkdir()); File bar = new File(filereferenceDir, "file"); IOUtils.writeFile(bar, Utf8.toBytes("test")); assertTrue(filereferenceDir.setLastModified(lastModifiedTime.toEpochMilli())); return filereferenceDir; } @Test public void delete() { { PrepareResult result = deployApp(testApp); long sessionId = result.sessionId(); Tenant tenant = tenantRepository.getTenant(applicationId().tenant()); LocalSession applicationData = tenant.getLocalSessionRepo().getSession(sessionId); assertNotNull(applicationData); assertNotNull(applicationData.getApplicationId()); assertNotNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertNull(tenant.getLocalSessionRepo().getSession(sessionId)); assertNull(tenant.getRemoteSessionRepo().getSession(sessionId)); assertTrue(provisioner.removed); assertThat(provisioner.lastApplicationId.tenant(), is(tenant.getName())); assertThat(provisioner.lastApplicationId, is(applicationId())); assertFalse(applicationRepository.deleteApplication(applicationId())); } { deployApp(testApp); assertTrue(applicationRepository.deleteApplication(applicationId())); deployApp(testApp); ApplicationId fooId = applicationId(tenant2); PrepareParams prepareParams2 = new PrepareParams.Builder().applicationId(fooId).build(); deployApp(testApp, prepareParams2); assertNotNull(applicationRepository.getActiveSession(fooId)); assertTrue(applicationRepository.deleteApplication(fooId)); assertThat(provisioner.lastApplicationId, is(fooId)); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplication(applicationId())); } } @Test public void deleteLegacy() { deployApp(testApp); assertNotNull(applicationRepository.getActiveSession(applicationId())); assertTrue(applicationRepository.deleteApplicationLegacy(applicationId())); assertNull(applicationRepository.getActiveSession(applicationId())); assertFalse(applicationRepository.deleteApplicationLegacy(applicationId())); } @Test public void testDeletingInactiveSessions() { ManualClock clock = new ManualClock(Instant.now()); ConfigserverConfig configserverConfig = new ConfigserverConfig(new ConfigserverConfig.Builder() .configServerDBDir(Files.createTempDir().getAbsolutePath()) .configDefinitionsDir(Files.createTempDir().getAbsolutePath()) .sessionLifetime(60)); DeployTester tester = new DeployTester(configserverConfig, clock); tester.deployApp("src/test/apps/app", clock.instant()); clock.advance(Duration.ofSeconds(10)); Optional<Deployment> deployment2 = tester.redeployFromLocalActive(); assertTrue(deployment2.isPresent()); deployment2.get().activate(); long activeSessionId = tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId()); clock.advance(Duration.ofSeconds(10)); Optional<com.yahoo.config.provision.Deployment> deployment3 = tester.redeployFromLocalActive(); assertTrue(deployment3.isPresent()); deployment3.get().prepare(); LocalSession deployment3session = ((com.yahoo.vespa.config.server.deploy.Deployment) deployment3.get()).session(); assertNotEquals(activeSessionId, deployment3session); assertEquals(activeSessionId, tester.tenant().getApplicationRepo().getSessionIdForApplication(tester.applicationId())); assertEquals(3, tester.tenant().getLocalSessionRepo().listSessions().size()); clock.advance(Duration.ofHours(1)); tester.applicationRepository().deleteExpiredLocalSessions(); final Collection<LocalSession> sessions = tester.tenant().getLocalSessionRepo().listSessions(); assertEquals(1, sessions.size()); assertEquals(3, new ArrayList<>(sessions).get(0).getSessionId()); assertEquals(0, applicationRepository.deleteExpiredRemoteSessions(Duration.ofSeconds(0))); } private PrepareResult prepareAndActivateApp(File application) throws IOException { FilesApplicationPackage appDir = FilesApplicationPackage.fromFile(application); ApplicationId applicationId = applicationId(); long sessionId = applicationRepository.createSession(applicationId, timeoutBudget, appDir.getAppDir()); return applicationRepository.prepareAndActivate(tenantRepository.getTenant(applicationId.tenant()), sessionId, prepareParams(), false, false, Instant.now()); } private PrepareResult deployApp(File applicationPackage) { return deployApp(applicationPackage, prepareParams()); } private PrepareResult deployApp(File applicationPackage, PrepareParams prepareParams) { return applicationRepository.deploy(applicationPackage, prepareParams); } private PrepareParams prepareParams() { return new PrepareParams.Builder().applicationId(applicationId()).build(); } private ApplicationId applicationId() { return ApplicationId.from(tenant1, ApplicationName.from("testapp"), InstanceName.defaultName()); } private ApplicationId applicationId(TenantName tenantName) { return ApplicationId.from(tenantName, ApplicationName.from("testapp"), InstanceName.defaultName()); } }
Imho that is harder to read because of the combination of streaming over keys and a complex anyMatch. Probably just personal taste, but I'll go with mine :-)
private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.deployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; }
return false;
private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.deployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().builtInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().builtInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.get().changeAt(controller.clock().instant()).isPresent() && ! application.get().deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.get().change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.get().change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> !isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { testJobs = testJobs(application, Versions.from(application, controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } /** Returns whether job can trigger at given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().builtInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant()) && application.changeAt(clock.instant()).application().isPresent() && ! application.deploymentJobs().hasFailures()) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if (!completion.isPresent() && canTrigger(jobType, versions, application)) { jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) { application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); if (application.get().deploymentJobs().deployedInternally()) for (Run run : jobs.active()) if (run.id().application().equals(report.applicationId())) jobs.abort(run.id()); } else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().deployedInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().deployedInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.get().changeAt(controller.clock().instant()).isPresent() && ! application.get().deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.get().change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.get().change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> !isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { testJobs = testJobs(application, Versions.from(application, controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } /** Returns whether job can trigger at given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().deployedInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant()) && application.changeAt(clock.instant()).application().isPresent() && ! application.deploymentJobs().hasFailures()) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if (!completion.isPresent() && canTrigger(jobType, versions, application)) { jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
No objection!
private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.deployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; }
return false;
private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.deployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().builtInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().builtInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.get().changeAt(controller.clock().instant()).isPresent() && ! application.get().deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.get().change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.get().change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> !isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { testJobs = testJobs(application, Versions.from(application, controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } /** Returns whether job can trigger at given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().builtInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant()) && application.changeAt(clock.instant()).application().isPresent() && ! application.deploymentJobs().hasFailures()) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if (!completion.isPresent() && canTrigger(jobType, versions, application)) { jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) { application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); if (application.get().deploymentJobs().deployedInternally()) for (Run run : jobs.active()) if (run.id().application().equals(report.applicationId())) jobs.abort(run.id()); } else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().deployedInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().deployedInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.get().changeAt(controller.clock().instant()).isPresent() && ! application.get().deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.get().change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.get().change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .notPullRequest() .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.changeAt(clock.instant()); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> !isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { testJobs = testJobs(application, Versions.from(application, controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } /** Returns whether job can trigger at given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().deployedInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant()) && application.changeAt(clock.instant()).application().isPresent() && ! application.deploymentJobs().hasFailures()) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.changeAt(clock.instant()).platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if (!completion.isPresent() && canTrigger(jobType, versions, application)) { jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
Can we remove these or change their accessibility when they're public functions in a `@PublicApi`/`@ExportPackage` package?
public RoutingTable getRoutingTable(String name) { Map<String, RoutingTable> tables = tablesRef.get(); if (tables == null) { return null; } return tables.get(name); }
Map<String, RoutingTable> tables = tablesRef.get();
public RoutingTable getRoutingTable(String name) { Map<String, RoutingTable> tables = tablesRef.get(); if (tables == null) { return null; } return tables.get(name); }
class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler, ReplyHandler { private static Logger log = Logger.getLogger(MessageBus.class.getName()); private final AtomicBoolean destroyed = new AtomicBoolean(false); private final ProtocolRepository protocolRepository = new ProtocolRepository(); private final AtomicReference<Map<String, RoutingTable>> tablesRef = new AtomicReference<>(null); private final CopyOnWriteHashMap<String, MessageHandler> sessions = new CopyOnWriteHashMap<>(); private final Network net; private final Messenger msn; private final Resender resender; private int maxPendingCount; private int maxPendingSize; private int pendingCount = 0; private int pendingSize = 0; private final Thread careTaker = new Thread(this::sendBlockedMessages); private final ConcurrentHashMap<SendBlockedMessages, Long> blockedSenders = new ConcurrentHashMap<>(); private MessageBusMetricSet metrics = new MessageBusMetricSet(); public interface SendBlockedMessages { /** * Do what you want, but dont block. * You will be called regularly until you signal you are done * @return true unless you are done */ boolean trySend(); } public void register(SendBlockedMessages sender) { blockedSenders.put(sender, SystemTimer.INSTANCE.milliTime()); } private void sendBlockedMessages() { while (! destroyed.get()) { for (SendBlockedMessages sender : blockedSenders.keySet()) { if (!sender.trySend()) { blockedSenders.remove(sender); } } try { Thread.sleep(10); } catch (InterruptedException e) { return; } } } /** * <p>Convenience constructor that proxies {@link * MessageBusParams)} by adding the given protocols to a default {@link * MessageBusParams} object.</p> * * @param net The network to associate with. * @param protocols An array of protocols to register. */ public MessageBus(Network net, List<Protocol> protocols) { this(net, new MessageBusParams().addProtocols(protocols)); } /** * <p>Constructs an instance of message bus. This requires a network object * that it will associate with. This assignment may not change during the * lifetime of this message bus.</p> * * @param net The network to associate with. * @param params The parameters that controls this bus. */ public MessageBus(Network net, MessageBusParams params) { maxPendingCount = params.getMaxPendingCount(); maxPendingSize = params.getMaxPendingSize(); for (int i = 0, len = params.getNumProtocols(); i < len; ++i) { protocolRepository.putProtocol(params.getProtocol(i)); if (params.getProtocol(i).getMetrics() != null) { metrics.protocols.addMetric(params.getProtocol(i).getMetrics()); } } this.net = net; net.attach(this); if ( ! net.waitUntilReady(120)) throw new IllegalStateException("Network failed to become ready in time."); msn = new Messenger(); RetryPolicy retryPolicy = params.getRetryPolicy(); if (retryPolicy != null) { resender = new Resender(retryPolicy); msn.addRecurrentTask(new ResenderTask(resender)); } else { resender = null; } careTaker.setDaemon(true); careTaker.start(); msn.start(); } /** * <p>Returns the metrics used by this messagebus.</p> * * @return The metric set. */ public MessageBusMetricSet getMetrics() { return metrics; } /** * <p>Sets the destroyed flag to true. The very first time this method is * called, it cleans up all its dependencies. Even if you retain a reference * to this object, all of its content is allowed to be garbage * collected.</p> * * @return True if content existed and was destroyed. */ public boolean destroy() { if (!destroyed.getAndSet(true)) { try { careTaker.join(); } catch (InterruptedException e) { } protocolRepository.clearPolicyCache(); net.shutdown(); msn.destroy(); if (resender != null) { resender.destroy(); } return true; } return false; } /** * <p>Synchronize with internal threads. This method will handshake with all * internal threads. This has the implicit effect of waiting for all active * callbacks. Note that this method should never be invoked from a callback * since that would make the thread wait for itself... forever. This method * is typically used to untangle during session shutdown.</p> */ public void sync() { msn.sync(); net.sync(); } /** * <p>This is a convenience method to call {@link * * {@link SourceSessionParams} object.</p> * * @param handler The reply handler to receive the replies for the session. * @return The created session. */ public SourceSession createSourceSession(ReplyHandler handler) { return createSourceSession(new SourceSessionParams().setReplyHandler(handler)); } /** * <p>This is a convenience method to call {@link * * handler to the parameter object.</p> * * @param handler The reply handler to receive the replies for the session. * @param params The parameters to control the session. * @return The created session. */ public SourceSession createSourceSession(ReplyHandler handler, SourceSessionParams params) { return createSourceSession(new SourceSessionParams(params).setReplyHandler(handler)); } /** * <p>Creates a source session on top of this message bus.</p> * * @param params The parameters to control the session. * @return The created session. */ public SourceSession createSourceSession(SourceSessionParams params) { if (destroyed.get()) { throw new IllegalStateException("Object is destroyed."); } return new SourceSession(this, params); } /** * <p>This is a convenience method to call {@link * * values for the {@link IntermediateSessionParams} object.</p> * * @param name The local unique name for the created session. * @param broadcastName Whether or not to broadcast this session's name on * the network. * @param msgHandler The handler to receive the messages for the session. * @param replyHandler The handler to received the replies for the session. * @return The created session. */ public IntermediateSession createIntermediateSession(String name, boolean broadcastName, MessageHandler msgHandler, ReplyHandler replyHandler) { return createIntermediateSession( new IntermediateSessionParams() .setName(name) .setBroadcastName(broadcastName) .setMessageHandler(msgHandler) .setReplyHandler(replyHandler)); } /** * <p>Creates an intermediate session on top of this message bus using the * given handlers and parameter object.</p> * * @param params The parameters to control the session. * @return The created session. */ public synchronized IntermediateSession createIntermediateSession(IntermediateSessionParams params) { if (destroyed.get()) { throw new IllegalStateException("Object is destroyed."); } if (sessions.containsKey(params.getName())) { throw new IllegalArgumentException("Name '" + params.getName() + "' is not unique."); } IntermediateSession session = new IntermediateSession(this, params); sessions.put(params.getName(), session); if (params.getBroadcastName()) { net.registerSession(params.getName()); } return session; } /** * <p>This is a convenience method to call {@link * * for the {@link DestinationSessionParams} object.</p> * * @param name The local unique name for the created session. * @param broadcastName Whether or not to broadcast this session's name on * the network. * @param handler The handler to receive the messages for the session. * @return The created session. */ public DestinationSession createDestinationSession(String name, boolean broadcastName, MessageHandler handler) { return createDestinationSession( new DestinationSessionParams() .setName(name) .setBroadcastName(broadcastName) .setMessageHandler(handler)); } /** * <p>Creates a destination session on top of this message bus using the * given handlers and parameter object.</p> * * @param params The parameters to control the session. * @return The created session. */ public synchronized DestinationSession createDestinationSession(DestinationSessionParams params) { if (destroyed.get()) { throw new IllegalStateException("Object is destroyed."); } if (sessions.containsKey(params.getName())) { throw new IllegalArgumentException("Name '" + params.getName() + "' is not unique."); } DestinationSession session = new DestinationSession(this, params); sessions.put(params.getName(), session); if (params.getBroadcastName()) { net.registerSession(params.getName()); } return session; } /** * <p>This method is invoked by the {@link * com.yahoo.messagebus.IntermediateSession * sessions from receiving data from message bus.</p> * * @param name The name of the session to remove. * @param broadcastName Whether or not session name was broadcast. */ public synchronized void unregisterSession(String name, boolean broadcastName) { if (broadcastName) { net.unregisterSession(name); } sessions.remove(name); } private boolean doAccounting() { return (maxPendingCount > 0 || maxPendingSize > 0); } /** * <p>This method handles choking input data so that message bus does not * blindly accept everything. This prevents an application running * out-of-memory in case it fail to choke input data itself. If this method * returns false, it means that it should be rejected.</p> * * @param msg The message to count. * @return True if the message was accepted. */ private boolean checkPending(Message msg) { boolean busy = false; int size = msg.getApproxSize(); if (doAccounting()) { synchronized (this) { busy = ((maxPendingCount > 0 && pendingCount >= maxPendingCount) || (maxPendingSize > 0 && pendingSize >= maxPendingSize)); if (!busy) { pendingCount++; pendingSize += size; } } } if (busy) { return false; } msg.setContext(size); msg.pushHandler(this); return true; } @Override public void handleMessage(Message msg) { if (resender != null && msg.hasBucketSequence()) { deliverError(msg, ErrorCode.SEQUENCE_ERROR, "Bucket sequences not supported when resender is enabled."); return; } SendProxy proxy = new SendProxy(this, net, resender); msn.deliverMessage(msg, proxy); } @Override public void handleReply(Reply reply) { if (destroyed.get()) { reply.discard(); return; } if (doAccounting()) { synchronized (this) { --pendingCount; pendingSize -= (Integer)reply.getContext(); } } deliverReply(reply, reply.popHandler()); } @Override public void deliverMessage(Message msg, String session) { MessageHandler msgHandler = sessions.get(session); if (msgHandler == null) { deliverError(msg, ErrorCode.UNKNOWN_SESSION, "Session '" + session + "' does not exist."); } else if (!checkPending(msg)) { deliverError(msg, ErrorCode.SESSION_BUSY, "Session '" + net.getConnectionSpec() + "/" + session + "' is busy, try again later."); } else { msn.deliverMessage(msg, msgHandler); } } /** * <p>Adds a protocol to the internal repository of protocols, replacing any * previous instance of the protocol and clearing the associated routing * policy cache.</p> * * @param protocol The protocol to add. */ public void putProtocol(Protocol protocol) { protocolRepository.putProtocol(protocol); } @Override public Protocol getProtocol(Utf8Array name) { return protocolRepository.getProtocol(name.toString()); } public Protocol getProtocol(Utf8String name) { return getProtocol((Utf8Array)name); } @Override public void deliverReply(Reply reply, ReplyHandler handler) { msn.deliverReply(reply, handler); } @Override public void setupRouting(RoutingSpec spec) { Map<String, RoutingTable> tables = new HashMap<>(); for (int i = 0, len = spec.getNumTables(); i < len; ++i) { RoutingTableSpec table = spec.getTable(i); String name = table.getProtocol(); if (!protocolRepository.hasProtocol(name)) { log.log(LogLevel.INFO, "Protocol '" + name + "' is not supported, ignoring routing table."); continue; } tables.put(name, new RoutingTable(table)); } tablesRef.set(tables); protocolRepository.clearPolicyCache(); } /** * <p>Returns the resender that is running within this message bus.</p> * * @return The resender. */ public Resender getResender() { return resender; } /** * <p>Returns the number of messages received that have not been replied to * yet.</p> * * @return The pending count. */ public synchronized int getPendingCount() { return pendingCount; } /** * <p>Returns the size of messages received that have not been replied to * yet.</p> * * @return The pending size. */ synchronized int getPendingSize() { return pendingSize; } /** * <p>Sets the maximum number of messages that can be received without being * replied to yet.</p> * * @param maxCount The max count. */ public void setMaxPendingCount(int maxCount) { maxPendingCount = maxCount; } /** * Gets maximum number of messages that can be received without being * replied to yet. */ public int getMaxPendingCount() { return maxPendingCount; } /** * <p>Sets the maximum size of messages that can be received without being * replied to yet.</p> * * @param maxSize The max size. */ public void setMaxPendingSize(int maxSize) { maxPendingSize = maxSize; } /** * <p>Returns a named routing table, may return null.</p> * * @param name The name of the routing table to return. * @return The routing table object. */ /** * <p>Returns a named routing table, may return null.</p> * * @param name The name of the routing table to return. * @return The routing table object. */ public RoutingTable getRoutingTable(Utf8String name) { return getRoutingTable(name.toString()); } /** * <p>Returns a routing policy that corresponds to the argument protocol * name, policy name and policy parameter. This will cache reuse all * policies as soon as they are first requested.</p> * * @param protocolName The name of the protocol to invoke {@link Protocol * @param policyName The name of the routing policy to retrieve. * @param policyParam The parameter for the routing policy to retrieve. * @return A corresponding routing policy, or null. */ public RoutingPolicy getRoutingPolicy(String protocolName, String policyName, String policyParam) { return protocolRepository.getRoutingPolicy(protocolName, policyName, policyParam); } /** * <p>Returns a routing policy that corresponds to the argument protocol * name, policy name and policy parameter. This will cache reuse all * policies as soon as they are first requested.</p> * * @param protocolName The name of the protocol to invoke {@link Protocol * @param policyName The name of the routing policy to retrieve. * @param policyParam The parameter for the routing policy to retrieve. * @return A corresponding routing policy, or null. */ public RoutingPolicy getRoutingPolicy(Utf8String protocolName, String policyName, String policyParam) { return protocolRepository.getRoutingPolicy(protocolName.toString(), policyName, policyParam); } /** * <p>Returns the connection spec string for the network layer of this * message bus. This is merely a proxy of the same function in the network * layer.</p> * * @return The connection string. */ public String getConnectionSpec() { return net.getConnectionSpec(); } /** * <p>Constructs and schedules a Reply containing an error to the handler of the given Message.</p> * * @param msg The message to reply to. * @param errCode The code of the error to set. * @param errMsg The message of the error to set. */ private void deliverError(Message msg, int errCode, String errMsg) { Reply reply = new EmptyReply(); reply.swapState(msg); reply.addError(new Error(errCode, errMsg)); deliverReply(reply, reply.popHandler()); } /** * <p>Implements a task for running the resender in the messenger * thread. This task acts as a proxy for the resender, allowing the task to * be deleted without affecting the resender itself.</p> */ private static class ResenderTask implements Messenger.Task { final Resender resender; ResenderTask(Resender resender) { this.resender = resender; } public void destroy() { } public void run() { resender.resendScheduled(); } } }
class MessageBus implements ConfigHandler, NetworkOwner, MessageHandler, ReplyHandler { private static Logger log = Logger.getLogger(MessageBus.class.getName()); private final AtomicBoolean destroyed = new AtomicBoolean(false); private final ProtocolRepository protocolRepository = new ProtocolRepository(); private final AtomicReference<Map<String, RoutingTable>> tablesRef = new AtomicReference<>(null); private final CopyOnWriteHashMap<String, MessageHandler> sessions = new CopyOnWriteHashMap<>(); private final Network net; private final Messenger msn; private final Resender resender; private int maxPendingCount; private int maxPendingSize; private int pendingCount = 0; private int pendingSize = 0; private final Thread careTaker = new Thread(this::sendBlockedMessages); private final ConcurrentHashMap<SendBlockedMessages, Long> blockedSenders = new ConcurrentHashMap<>(); private MessageBusMetricSet metrics = new MessageBusMetricSet(); public interface SendBlockedMessages { /** * Do what you want, but dont block. * You will be called regularly until you signal you are done * @return true unless you are done */ boolean trySend(); } public void register(SendBlockedMessages sender) { blockedSenders.put(sender, SystemTimer.INSTANCE.milliTime()); } private void sendBlockedMessages() { while (! destroyed.get()) { for (SendBlockedMessages sender : blockedSenders.keySet()) { if (!sender.trySend()) { blockedSenders.remove(sender); } } try { Thread.sleep(10); } catch (InterruptedException e) { return; } } } /** * <p>Convenience constructor that proxies {@link * MessageBusParams)} by adding the given protocols to a default {@link * MessageBusParams} object.</p> * * @param net The network to associate with. * @param protocols An array of protocols to register. */ public MessageBus(Network net, List<Protocol> protocols) { this(net, new MessageBusParams().addProtocols(protocols)); } /** * <p>Constructs an instance of message bus. This requires a network object * that it will associate with. This assignment may not change during the * lifetime of this message bus.</p> * * @param net The network to associate with. * @param params The parameters that controls this bus. */ public MessageBus(Network net, MessageBusParams params) { maxPendingCount = params.getMaxPendingCount(); maxPendingSize = params.getMaxPendingSize(); for (int i = 0, len = params.getNumProtocols(); i < len; ++i) { protocolRepository.putProtocol(params.getProtocol(i)); if (params.getProtocol(i).getMetrics() != null) { metrics.protocols.addMetric(params.getProtocol(i).getMetrics()); } } this.net = net; net.attach(this); if ( ! net.waitUntilReady(120)) throw new IllegalStateException("Network failed to become ready in time."); msn = new Messenger(); RetryPolicy retryPolicy = params.getRetryPolicy(); if (retryPolicy != null) { resender = new Resender(retryPolicy); msn.addRecurrentTask(new ResenderTask(resender)); } else { resender = null; } careTaker.setDaemon(true); careTaker.start(); msn.start(); } /** * <p>Returns the metrics used by this messagebus.</p> * * @return The metric set. */ public MessageBusMetricSet getMetrics() { return metrics; } /** * <p>Sets the destroyed flag to true. The very first time this method is * called, it cleans up all its dependencies. Even if you retain a reference * to this object, all of its content is allowed to be garbage * collected.</p> * * @return True if content existed and was destroyed. */ public boolean destroy() { if (!destroyed.getAndSet(true)) { try { careTaker.join(); } catch (InterruptedException e) { } protocolRepository.clearPolicyCache(); net.shutdown(); msn.destroy(); if (resender != null) { resender.destroy(); } return true; } return false; } /** * <p>Synchronize with internal threads. This method will handshake with all * internal threads. This has the implicit effect of waiting for all active * callbacks. Note that this method should never be invoked from a callback * since that would make the thread wait for itself... forever. This method * is typically used to untangle during session shutdown.</p> */ public void sync() { msn.sync(); net.sync(); } /** * <p>This is a convenience method to call {@link * * {@link SourceSessionParams} object.</p> * * @param handler The reply handler to receive the replies for the session. * @return The created session. */ public SourceSession createSourceSession(ReplyHandler handler) { return createSourceSession(new SourceSessionParams().setReplyHandler(handler)); } /** * <p>This is a convenience method to call {@link * * handler to the parameter object.</p> * * @param handler The reply handler to receive the replies for the session. * @param params The parameters to control the session. * @return The created session. */ public SourceSession createSourceSession(ReplyHandler handler, SourceSessionParams params) { return createSourceSession(new SourceSessionParams(params).setReplyHandler(handler)); } /** * <p>Creates a source session on top of this message bus.</p> * * @param params The parameters to control the session. * @return The created session. */ public SourceSession createSourceSession(SourceSessionParams params) { if (destroyed.get()) { throw new IllegalStateException("Object is destroyed."); } return new SourceSession(this, params); } /** * <p>This is a convenience method to call {@link * * values for the {@link IntermediateSessionParams} object.</p> * * @param name The local unique name for the created session. * @param broadcastName Whether or not to broadcast this session's name on * the network. * @param msgHandler The handler to receive the messages for the session. * @param replyHandler The handler to received the replies for the session. * @return The created session. */ public IntermediateSession createIntermediateSession(String name, boolean broadcastName, MessageHandler msgHandler, ReplyHandler replyHandler) { return createIntermediateSession( new IntermediateSessionParams() .setName(name) .setBroadcastName(broadcastName) .setMessageHandler(msgHandler) .setReplyHandler(replyHandler)); } /** * <p>Creates an intermediate session on top of this message bus using the * given handlers and parameter object.</p> * * @param params The parameters to control the session. * @return The created session. */ public synchronized IntermediateSession createIntermediateSession(IntermediateSessionParams params) { if (destroyed.get()) { throw new IllegalStateException("Object is destroyed."); } if (sessions.containsKey(params.getName())) { throw new IllegalArgumentException("Name '" + params.getName() + "' is not unique."); } IntermediateSession session = new IntermediateSession(this, params); sessions.put(params.getName(), session); if (params.getBroadcastName()) { net.registerSession(params.getName()); } return session; } /** * <p>This is a convenience method to call {@link * * for the {@link DestinationSessionParams} object.</p> * * @param name The local unique name for the created session. * @param broadcastName Whether or not to broadcast this session's name on * the network. * @param handler The handler to receive the messages for the session. * @return The created session. */ public DestinationSession createDestinationSession(String name, boolean broadcastName, MessageHandler handler) { return createDestinationSession( new DestinationSessionParams() .setName(name) .setBroadcastName(broadcastName) .setMessageHandler(handler)); } /** * <p>Creates a destination session on top of this message bus using the * given handlers and parameter object.</p> * * @param params The parameters to control the session. * @return The created session. */ public synchronized DestinationSession createDestinationSession(DestinationSessionParams params) { if (destroyed.get()) { throw new IllegalStateException("Object is destroyed."); } if (sessions.containsKey(params.getName())) { throw new IllegalArgumentException("Name '" + params.getName() + "' is not unique."); } DestinationSession session = new DestinationSession(this, params); sessions.put(params.getName(), session); if (params.getBroadcastName()) { net.registerSession(params.getName()); } return session; } /** * <p>This method is invoked by the {@link * com.yahoo.messagebus.IntermediateSession * sessions from receiving data from message bus.</p> * * @param name The name of the session to remove. * @param broadcastName Whether or not session name was broadcast. */ public synchronized void unregisterSession(String name, boolean broadcastName) { if (broadcastName) { net.unregisterSession(name); } sessions.remove(name); } private boolean doAccounting() { return (maxPendingCount > 0 || maxPendingSize > 0); } /** * <p>This method handles choking input data so that message bus does not * blindly accept everything. This prevents an application running * out-of-memory in case it fail to choke input data itself. If this method * returns false, it means that it should be rejected.</p> * * @param msg The message to count. * @return True if the message was accepted. */ private boolean checkPending(Message msg) { boolean busy = false; int size = msg.getApproxSize(); if (doAccounting()) { synchronized (this) { busy = ((maxPendingCount > 0 && pendingCount >= maxPendingCount) || (maxPendingSize > 0 && pendingSize >= maxPendingSize)); if (!busy) { pendingCount++; pendingSize += size; } } } if (busy) { return false; } msg.setContext(size); msg.pushHandler(this); return true; } @Override public void handleMessage(Message msg) { if (resender != null && msg.hasBucketSequence()) { deliverError(msg, ErrorCode.SEQUENCE_ERROR, "Bucket sequences not supported when resender is enabled."); return; } SendProxy proxy = new SendProxy(this, net, resender); msn.deliverMessage(msg, proxy); } @Override public void handleReply(Reply reply) { if (destroyed.get()) { reply.discard(); return; } if (doAccounting()) { synchronized (this) { --pendingCount; pendingSize -= (Integer)reply.getContext(); } } deliverReply(reply, reply.popHandler()); } @Override public void deliverMessage(Message msg, String session) { MessageHandler msgHandler = sessions.get(session); if (msgHandler == null) { deliverError(msg, ErrorCode.UNKNOWN_SESSION, "Session '" + session + "' does not exist."); } else if (!checkPending(msg)) { deliverError(msg, ErrorCode.SESSION_BUSY, "Session '" + net.getConnectionSpec() + "/" + session + "' is busy, try again later."); } else { msn.deliverMessage(msg, msgHandler); } } /** * <p>Adds a protocol to the internal repository of protocols, replacing any * previous instance of the protocol and clearing the associated routing * policy cache.</p> * * @param protocol The protocol to add. */ public void putProtocol(Protocol protocol) { protocolRepository.putProtocol(protocol); } @Override public Protocol getProtocol(Utf8Array name) { return protocolRepository.getProtocol(name.toString()); } public Protocol getProtocol(Utf8String name) { return getProtocol((Utf8Array)name); } @Override public void deliverReply(Reply reply, ReplyHandler handler) { msn.deliverReply(reply, handler); } @Override public void setupRouting(RoutingSpec spec) { Map<String, RoutingTable> tables = new HashMap<>(); for (int i = 0, len = spec.getNumTables(); i < len; ++i) { RoutingTableSpec table = spec.getTable(i); String name = table.getProtocol(); if (!protocolRepository.hasProtocol(name)) { log.log(LogLevel.INFO, "Protocol '" + name + "' is not supported, ignoring routing table."); continue; } tables.put(name, new RoutingTable(table)); } tablesRef.set(tables); protocolRepository.clearPolicyCache(); } /** * <p>Returns the resender that is running within this message bus.</p> * * @return The resender. */ public Resender getResender() { return resender; } /** * <p>Returns the number of messages received that have not been replied to * yet.</p> * * @return The pending count. */ public synchronized int getPendingCount() { return pendingCount; } /** * <p>Returns the size of messages received that have not been replied to * yet.</p> * * @return The pending size. */ public synchronized int getPendingSize() { return pendingSize; } /** * <p>Sets the maximum number of messages that can be received without being * replied to yet.</p> * * @param maxCount The max count. */ public void setMaxPendingCount(int maxCount) { maxPendingCount = maxCount; } /** * Gets maximum number of messages that can be received without being * replied to yet. */ public int getMaxPendingCount() { return maxPendingCount; } /** * <p>Sets the maximum size of messages that can be received without being * replied to yet.</p> * * @param maxSize The max size. */ public void setMaxPendingSize(int maxSize) { maxPendingSize = maxSize; } /** * Gets maximum combined size of messages that can be received without * being replied to yet. */ public int getMaxPendingSize() { return maxPendingSize; } /** * <p>Returns a named routing table, may return null.</p> * * @param name The name of the routing table to return. * @return The routing table object. */ /** * <p>Returns a named routing table, may return null.</p> * * @param name The name of the routing table to return. * @return The routing table object. */ public RoutingTable getRoutingTable(Utf8String name) { return getRoutingTable(name.toString()); } /** * <p>Returns a routing policy that corresponds to the argument protocol * name, policy name and policy parameter. This will cache reuse all * policies as soon as they are first requested.</p> * * @param protocolName The name of the protocol to invoke {@link Protocol * @param policyName The name of the routing policy to retrieve. * @param policyParam The parameter for the routing policy to retrieve. * @return A corresponding routing policy, or null. */ public RoutingPolicy getRoutingPolicy(String protocolName, String policyName, String policyParam) { return protocolRepository.getRoutingPolicy(protocolName, policyName, policyParam); } /** * <p>Returns a routing policy that corresponds to the argument protocol * name, policy name and policy parameter. This will cache reuse all * policies as soon as they are first requested.</p> * * @param protocolName The name of the protocol to invoke {@link Protocol * @param policyName The name of the routing policy to retrieve. * @param policyParam The parameter for the routing policy to retrieve. * @return A corresponding routing policy, or null. */ public RoutingPolicy getRoutingPolicy(Utf8String protocolName, String policyName, String policyParam) { return protocolRepository.getRoutingPolicy(protocolName.toString(), policyName, policyParam); } /** * <p>Returns the connection spec string for the network layer of this * message bus. This is merely a proxy of the same function in the network * layer.</p> * * @return The connection string. */ public String getConnectionSpec() { return net.getConnectionSpec(); } /** * <p>Constructs and schedules a Reply containing an error to the handler of the given Message.</p> * * @param msg The message to reply to. * @param errCode The code of the error to set. * @param errMsg The message of the error to set. */ private void deliverError(Message msg, int errCode, String errMsg) { Reply reply = new EmptyReply(); reply.swapState(msg); reply.addError(new Error(errCode, errMsg)); deliverReply(reply, reply.popHandler()); } /** * <p>Implements a task for running the resender in the messenger * thread. This task acts as a proxy for the resender, allowing the task to * be deleted without affecting the resender itself.</p> */ private static class ResenderTask implements Messenger.Task { final Resender resender; ResenderTask(Resender resender) { this.resender = resender; } public void destroy() { } public void run() { resender.resendScheduled(); } } }
Could consider factoring this out into a separate timeout verification method, as these concrete duration values are checked multiple times
public void makesAtLeast3RequestsWithShortProcessingTime() { assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeout()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeout()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeout()); Duration shortPocessingTime = Duration.ofMillis(200); clock.advance(shortPocessingTime); assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeout()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeout()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeout()); clock.advance(shortPocessingTime); assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeout()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeout()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeout()); }
assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeout());
public void makesAtLeast3RequestsWithShortProcessingTime() { assertStandardTimeouts(); Duration shortProcessingTime = Duration.ofMillis(200); clock.advance(shortProcessingTime); assertStandardTimeouts(); clock.advance(shortProcessingTime); assertStandardTimeouts(); }
class ClusterControllerClientTimeoutsTest { private static final Duration MINIMUM_TIME_LEFT = IN_PROCESS_OVERHEAD_PER_CALL .plus(CONNECT_TIMEOUT) .plus(NETWORK_OVERHEAD_PER_CALL) .plus(MIN_SERVER_TIMEOUT); static { assertEquals(Duration.ofMillis(160), MINIMUM_TIME_LEFT); } private static final Duration MINIMUM_ORIGINAL_TIMEOUT = MINIMUM_TIME_LEFT .multipliedBy(NUM_CALLS) .plus(IN_PROCESS_OVERHEAD); static { assertEquals(Duration.ofMillis(420), MINIMUM_ORIGINAL_TIMEOUT); } private final ManualClock clock = new ManualClock(); private Duration originalTimeout; private TimeBudget timeBudget; private ClusterControllerClientTimeouts timeouts; private void makeTimeouts(Duration originalTimeout) { this.originalTimeout = originalTimeout; this.timeBudget = TimeBudget.from(clock, clock.instant(), Optional.of(originalTimeout)); this.timeouts = new ClusterControllerClientTimeouts("clustername", timeBudget); } @Before public void setUp() { makeTimeouts(Duration.ofSeconds(3)); } @Test public void makes2RequestsWithMaxProcessingTime() { assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeout()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeout()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeout()); Duration maxProcessingTime = IN_PROCESS_OVERHEAD_PER_CALL .plus(CONNECT_TIMEOUT) .plus(timeouts.getReadTimeout()); assertEquals(1450, maxProcessingTime.toMillis()); clock.advance(maxProcessingTime); assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeout()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeout()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeout()); clock.advance(maxProcessingTime); try { timeouts.getServerTimeout(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Too little time left (PT0.1S) to call content cluster 'clustername', original timeout was PT3S", e.getMessage()); } } @Test @Test public void alreadyTimedOut() { clock.advance(Duration.ofSeconds(4)); try { timeouts.getServerTimeout(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Exceeded the timeout PT3S against content cluster 'clustername' by PT1S", e.getMessage()); } } @Test public void justTooLittleTime() { clock.advance(originalTimeout.minus(MINIMUM_TIME_LEFT).plus(Duration.ofMillis(1))); try { timeouts.getServerTimeout(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Server would be given too little time to complete: PT0.009S. Original timeout was PT3S", e.getMessage()); } } @Test public void justEnoughTime() { clock.advance(originalTimeout.minus(MINIMUM_TIME_LEFT)); timeouts.getServerTimeout(); } @Test public void justTooLittleInitialTime() { makeTimeouts(MINIMUM_ORIGINAL_TIMEOUT.minus(Duration.ofMillis(1))); try { timeouts.getServerTimeout(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Server would be given too little time to complete: PT0.0095S. Original timeout was PT0.419S", e.getMessage()); } } @Test public void justEnoughInitialTime() { makeTimeouts(MINIMUM_ORIGINAL_TIMEOUT); timeouts.getServerTimeout(); } }
class ClusterControllerClientTimeoutsTest { private static final Duration MINIMUM_TIME_LEFT = IN_PROCESS_OVERHEAD_PER_CALL .plus(CONNECT_TIMEOUT) .plus(NETWORK_OVERHEAD_PER_CALL) .plus(MIN_SERVER_TIMEOUT); static { assertEquals(Duration.ofMillis(160), MINIMUM_TIME_LEFT); } private static final Duration MINIMUM_ORIGINAL_TIMEOUT = MINIMUM_TIME_LEFT .multipliedBy(NUM_CALLS) .plus(IN_PROCESS_OVERHEAD); static { assertEquals(Duration.ofMillis(420), MINIMUM_ORIGINAL_TIMEOUT); } private final ManualClock clock = new ManualClock(); private Duration originalTimeout; private TimeBudget timeBudget; private ClusterControllerClientTimeouts timeouts; private void makeTimeouts(Duration originalTimeout) { this.originalTimeout = originalTimeout; this.timeBudget = TimeBudget.from(clock, clock.instant(), Optional.of(originalTimeout)); this.timeouts = new ClusterControllerClientTimeouts("clustername", timeBudget); } @Before public void setUp() { makeTimeouts(Duration.ofSeconds(3)); } @Test public void makes2RequestsWithMaxProcessingTime() { assertStandardTimeouts(); Duration maxProcessingTime = IN_PROCESS_OVERHEAD_PER_CALL .plus(CONNECT_TIMEOUT) .plus(timeouts.getReadTimeoutOrThrow()); assertEquals(1450, maxProcessingTime.toMillis()); clock.advance(maxProcessingTime); assertStandardTimeouts(); clock.advance(maxProcessingTime); try { timeouts.getServerTimeoutOrThrow(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Too little time left (PT0.1S) to call content cluster 'clustername', original timeout was PT3S", e.getMessage()); } } @Test private void assertStandardTimeouts() { assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeoutOrThrow()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeoutOrThrow()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeoutOrThrow()); } @Test public void alreadyTimedOut() { clock.advance(Duration.ofSeconds(4)); try { timeouts.getServerTimeoutOrThrow(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Exceeded the timeout PT3S against content cluster 'clustername' by PT1S", e.getMessage()); } } @Test public void justTooLittleTime() { clock.advance(originalTimeout.minus(MINIMUM_TIME_LEFT).plus(Duration.ofMillis(1))); try { timeouts.getServerTimeoutOrThrow(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Server would be given too little time to complete: PT0.009S. Original timeout was PT3S", e.getMessage()); } } @Test public void justEnoughTime() { clock.advance(originalTimeout.minus(MINIMUM_TIME_LEFT)); timeouts.getServerTimeoutOrThrow(); } @Test public void justTooLittleInitialTime() { makeTimeouts(MINIMUM_ORIGINAL_TIMEOUT.minus(Duration.ofMillis(1))); try { timeouts.getServerTimeoutOrThrow(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Server would be given too little time to complete: PT0.0095S. Original timeout was PT0.419S", e.getMessage()); } } @Test public void justEnoughInitialTime() { makeTimeouts(MINIMUM_ORIGINAL_TIMEOUT); timeouts.getServerTimeoutOrThrow(); } }
s/Pocessing/Processing/
public void makesAtLeast3RequestsWithShortProcessingTime() { assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeout()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeout()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeout()); Duration shortPocessingTime = Duration.ofMillis(200); clock.advance(shortPocessingTime); assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeout()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeout()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeout()); clock.advance(shortPocessingTime); assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeout()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeout()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeout()); }
Duration shortPocessingTime = Duration.ofMillis(200);
public void makesAtLeast3RequestsWithShortProcessingTime() { assertStandardTimeouts(); Duration shortProcessingTime = Duration.ofMillis(200); clock.advance(shortProcessingTime); assertStandardTimeouts(); clock.advance(shortProcessingTime); assertStandardTimeouts(); }
class ClusterControllerClientTimeoutsTest { private static final Duration MINIMUM_TIME_LEFT = IN_PROCESS_OVERHEAD_PER_CALL .plus(CONNECT_TIMEOUT) .plus(NETWORK_OVERHEAD_PER_CALL) .plus(MIN_SERVER_TIMEOUT); static { assertEquals(Duration.ofMillis(160), MINIMUM_TIME_LEFT); } private static final Duration MINIMUM_ORIGINAL_TIMEOUT = MINIMUM_TIME_LEFT .multipliedBy(NUM_CALLS) .plus(IN_PROCESS_OVERHEAD); static { assertEquals(Duration.ofMillis(420), MINIMUM_ORIGINAL_TIMEOUT); } private final ManualClock clock = new ManualClock(); private Duration originalTimeout; private TimeBudget timeBudget; private ClusterControllerClientTimeouts timeouts; private void makeTimeouts(Duration originalTimeout) { this.originalTimeout = originalTimeout; this.timeBudget = TimeBudget.from(clock, clock.instant(), Optional.of(originalTimeout)); this.timeouts = new ClusterControllerClientTimeouts("clustername", timeBudget); } @Before public void setUp() { makeTimeouts(Duration.ofSeconds(3)); } @Test public void makes2RequestsWithMaxProcessingTime() { assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeout()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeout()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeout()); Duration maxProcessingTime = IN_PROCESS_OVERHEAD_PER_CALL .plus(CONNECT_TIMEOUT) .plus(timeouts.getReadTimeout()); assertEquals(1450, maxProcessingTime.toMillis()); clock.advance(maxProcessingTime); assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeout()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeout()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeout()); clock.advance(maxProcessingTime); try { timeouts.getServerTimeout(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Too little time left (PT0.1S) to call content cluster 'clustername', original timeout was PT3S", e.getMessage()); } } @Test @Test public void alreadyTimedOut() { clock.advance(Duration.ofSeconds(4)); try { timeouts.getServerTimeout(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Exceeded the timeout PT3S against content cluster 'clustername' by PT1S", e.getMessage()); } } @Test public void justTooLittleTime() { clock.advance(originalTimeout.minus(MINIMUM_TIME_LEFT).plus(Duration.ofMillis(1))); try { timeouts.getServerTimeout(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Server would be given too little time to complete: PT0.009S. Original timeout was PT3S", e.getMessage()); } } @Test public void justEnoughTime() { clock.advance(originalTimeout.minus(MINIMUM_TIME_LEFT)); timeouts.getServerTimeout(); } @Test public void justTooLittleInitialTime() { makeTimeouts(MINIMUM_ORIGINAL_TIMEOUT.minus(Duration.ofMillis(1))); try { timeouts.getServerTimeout(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Server would be given too little time to complete: PT0.0095S. Original timeout was PT0.419S", e.getMessage()); } } @Test public void justEnoughInitialTime() { makeTimeouts(MINIMUM_ORIGINAL_TIMEOUT); timeouts.getServerTimeout(); } }
class ClusterControllerClientTimeoutsTest { private static final Duration MINIMUM_TIME_LEFT = IN_PROCESS_OVERHEAD_PER_CALL .plus(CONNECT_TIMEOUT) .plus(NETWORK_OVERHEAD_PER_CALL) .plus(MIN_SERVER_TIMEOUT); static { assertEquals(Duration.ofMillis(160), MINIMUM_TIME_LEFT); } private static final Duration MINIMUM_ORIGINAL_TIMEOUT = MINIMUM_TIME_LEFT .multipliedBy(NUM_CALLS) .plus(IN_PROCESS_OVERHEAD); static { assertEquals(Duration.ofMillis(420), MINIMUM_ORIGINAL_TIMEOUT); } private final ManualClock clock = new ManualClock(); private Duration originalTimeout; private TimeBudget timeBudget; private ClusterControllerClientTimeouts timeouts; private void makeTimeouts(Duration originalTimeout) { this.originalTimeout = originalTimeout; this.timeBudget = TimeBudget.from(clock, clock.instant(), Optional.of(originalTimeout)); this.timeouts = new ClusterControllerClientTimeouts("clustername", timeBudget); } @Before public void setUp() { makeTimeouts(Duration.ofSeconds(3)); } @Test public void makes2RequestsWithMaxProcessingTime() { assertStandardTimeouts(); Duration maxProcessingTime = IN_PROCESS_OVERHEAD_PER_CALL .plus(CONNECT_TIMEOUT) .plus(timeouts.getReadTimeoutOrThrow()); assertEquals(1450, maxProcessingTime.toMillis()); clock.advance(maxProcessingTime); assertStandardTimeouts(); clock.advance(maxProcessingTime); try { timeouts.getServerTimeoutOrThrow(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Too little time left (PT0.1S) to call content cluster 'clustername', original timeout was PT3S", e.getMessage()); } } @Test private void assertStandardTimeouts() { assertEquals(Duration.ofMillis(50), timeouts.getConnectTimeoutOrThrow()); assertEquals(Duration.ofMillis(1350), timeouts.getReadTimeoutOrThrow()); assertEquals(Duration.ofMillis(1300), timeouts.getServerTimeoutOrThrow()); } @Test public void alreadyTimedOut() { clock.advance(Duration.ofSeconds(4)); try { timeouts.getServerTimeoutOrThrow(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Exceeded the timeout PT3S against content cluster 'clustername' by PT1S", e.getMessage()); } } @Test public void justTooLittleTime() { clock.advance(originalTimeout.minus(MINIMUM_TIME_LEFT).plus(Duration.ofMillis(1))); try { timeouts.getServerTimeoutOrThrow(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Server would be given too little time to complete: PT0.009S. Original timeout was PT3S", e.getMessage()); } } @Test public void justEnoughTime() { clock.advance(originalTimeout.minus(MINIMUM_TIME_LEFT)); timeouts.getServerTimeoutOrThrow(); } @Test public void justTooLittleInitialTime() { makeTimeouts(MINIMUM_ORIGINAL_TIMEOUT.minus(Duration.ofMillis(1))); try { timeouts.getServerTimeoutOrThrow(); fail(); } catch (UncheckedTimeoutException e) { assertEquals( "Server would be given too little time to complete: PT0.0095S. Original timeout was PT0.419S", e.getMessage()); } } @Test public void justEnoughInitialTime() { makeTimeouts(MINIMUM_ORIGINAL_TIMEOUT); timeouts.getServerTimeoutOrThrow(); } }
Perhaps the SimpleFeedAccess interface could have a close method instead?
public HttpResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) { if (request.getProperty("status") != null) { return new MetricResponse(context.getMetrics().getMetricSet()); } try { int busy = busyThreads.incrementAndGet(); if (busy > maxBusyThreads) return new EmptyResponse(com.yahoo.jdisc.http.HttpResponse.Status.SERVICE_UNAVAILABLE); boolean asynchronous = request.getBooleanProperty("asynchronous"); MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request); String route = properties.getRoute().toString(); FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback)); SingleSender sender = new SingleSender(response, getSharedSender(route), !asynchronous); sender.addMessageProcessor(properties); sender.addMessageProcessor(new DocprocMessageProcessor(getDocprocChain(request), getDocprocServiceRegistry(request))); SimpleFeedAccess feedAccess = sender; if (numThreads != 1) { feedAccess = new ThreadedFeedAccess(numThreads, feedAccess); } Feeder feeder = createFeeder(feedAccess, request); feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError()); feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent()); response.setAbortOnFeedError(properties.getAbortOnFeedError()); List<String> errors = feeder.parse(); for (String s : errors) { response.addXMLParseError(s); } if (errors.size() > 0 && feeder instanceof XMLFeeder) { response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json."); } sender.done(); if (feedAccess instanceof ThreadedFeedAccess) { ((ThreadedFeedAccess)feedAccess).close(); } if (asynchronous) { return response; } long millis = getTimeoutMillis(request); boolean completed = sender.waitForPending(millis); if (!completed) { response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses"); } response.done(); return response; } finally { busyThreads.decrementAndGet(); } }
}
public HttpResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) { if (request.getProperty("status") != null) { return new MetricResponse(context.getMetrics().getMetricSet()); } try { int busy = busyThreads.incrementAndGet(); if (busy > maxBusyThreads) return new EmptyResponse(com.yahoo.jdisc.http.HttpResponse.Status.SERVICE_UNAVAILABLE); boolean asynchronous = request.getBooleanProperty("asynchronous"); MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request); String route = properties.getRoute().toString(); FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback)); SingleSender sender = new SingleSender(response, getSharedSender(route), !asynchronous); sender.addMessageProcessor(properties); sender.addMessageProcessor(new DocprocMessageProcessor(getDocprocChain(request), getDocprocServiceRegistry(request))); ThreadedFeedAccess feedAccess = new ThreadedFeedAccess(numThreads, sender); Feeder feeder = createFeeder(sender, request); feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError()); feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent()); response.setAbortOnFeedError(properties.getAbortOnFeedError()); List<String> errors = feeder.parse(); for (String s : errors) { response.addXMLParseError(s); } if (errors.size() > 0 && feeder instanceof XMLFeeder) { response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json."); } sender.done(); feedAccess.close(); if (asynchronous) { return response; } long millis = getTimeoutMillis(request); boolean completed = sender.waitForPending(millis); if (!completed) { response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses"); } response.done(); return response; } finally { busyThreads.decrementAndGet(); } }
class ThreadedFeedAccess implements SimpleFeedAccess { private final SimpleFeedAccess simpleFeedAccess; private final ExecutorService executor; ThreadedFeedAccess(int numThreads, SimpleFeedAccess simpleFeedAccess) { this.simpleFeedAccess = simpleFeedAccess; if (numThreads <= 0) { numThreads = Runtime.getRuntime().availableProcessors(); } executor = new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS, new SynchronousQueue<>(false), ThreadFactoryFactory.getDaemonThreadFactory("feeder"), new ThreadPoolExecutor.CallerRunsPolicy()); } @Override public void put(Document doc) { executor.execute(() -> simpleFeedAccess.put(doc)); } @Override public void remove(DocumentId docId) { executor.execute(() -> simpleFeedAccess.remove(docId)); } @Override public void update(DocumentUpdate update) { executor.execute(() -> simpleFeedAccess.update(update)); } @Override public void put(Document doc, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.put(doc, condition)); } @Override public void remove(DocumentId docId, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.remove(docId, condition)); } @Override public void update(DocumentUpdate update, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.update(update, condition)); } @Override public boolean isAborted() { return simpleFeedAccess.isAborted(); } void close() { executor.shutdown(); } }
class ThreadedFeedAccess implements SimpleFeedAccess { private final SimpleFeedAccess simpleFeedAccess; private final ExecutorService executorService; private final Executor executor; ThreadedFeedAccess(int numThreads, SimpleFeedAccess simpleFeedAccess) { this.simpleFeedAccess = simpleFeedAccess; if (numThreads <= 0) { numThreads = Runtime.getRuntime().availableProcessors(); } if (numThreads > 1) { executorService = new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS, new SynchronousQueue<>(false), ThreadFactoryFactory.getDaemonThreadFactory("feeder"), new ThreadPoolExecutor.CallerRunsPolicy()); executor = executorService; } else { executorService = null; executor = new Executor() { @Override public void execute(Runnable command) { command.run(); } }; } } @Override public void put(Document doc) { executor.execute(() -> simpleFeedAccess.put(doc)); } @Override public void remove(DocumentId docId) { executor.execute(() -> simpleFeedAccess.remove(docId)); } @Override public void update(DocumentUpdate update) { executor.execute(() -> simpleFeedAccess.update(update)); } @Override public void put(Document doc, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.put(doc, condition)); } @Override public void remove(DocumentId docId, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.remove(docId, condition)); } @Override public void update(DocumentUpdate update, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.update(update, condition)); } @Override public boolean isAborted() { return simpleFeedAccess.isAborted(); } @Override public void close() { if (executorService != null) { executorService.shutdown(); } } }
And if you are making changes I suggest making ThreadedFeedAccess a top-level class.
public HttpResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) { if (request.getProperty("status") != null) { return new MetricResponse(context.getMetrics().getMetricSet()); } try { int busy = busyThreads.incrementAndGet(); if (busy > maxBusyThreads) return new EmptyResponse(com.yahoo.jdisc.http.HttpResponse.Status.SERVICE_UNAVAILABLE); boolean asynchronous = request.getBooleanProperty("asynchronous"); MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request); String route = properties.getRoute().toString(); FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback)); SingleSender sender = new SingleSender(response, getSharedSender(route), !asynchronous); sender.addMessageProcessor(properties); sender.addMessageProcessor(new DocprocMessageProcessor(getDocprocChain(request), getDocprocServiceRegistry(request))); SimpleFeedAccess feedAccess = sender; if (numThreads != 1) { feedAccess = new ThreadedFeedAccess(numThreads, feedAccess); } Feeder feeder = createFeeder(feedAccess, request); feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError()); feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent()); response.setAbortOnFeedError(properties.getAbortOnFeedError()); List<String> errors = feeder.parse(); for (String s : errors) { response.addXMLParseError(s); } if (errors.size() > 0 && feeder instanceof XMLFeeder) { response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json."); } sender.done(); if (feedAccess instanceof ThreadedFeedAccess) { ((ThreadedFeedAccess)feedAccess).close(); } if (asynchronous) { return response; } long millis = getTimeoutMillis(request); boolean completed = sender.waitForPending(millis); if (!completed) { response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses"); } response.done(); return response; } finally { busyThreads.decrementAndGet(); } }
}
public HttpResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) { if (request.getProperty("status") != null) { return new MetricResponse(context.getMetrics().getMetricSet()); } try { int busy = busyThreads.incrementAndGet(); if (busy > maxBusyThreads) return new EmptyResponse(com.yahoo.jdisc.http.HttpResponse.Status.SERVICE_UNAVAILABLE); boolean asynchronous = request.getBooleanProperty("asynchronous"); MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request); String route = properties.getRoute().toString(); FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback)); SingleSender sender = new SingleSender(response, getSharedSender(route), !asynchronous); sender.addMessageProcessor(properties); sender.addMessageProcessor(new DocprocMessageProcessor(getDocprocChain(request), getDocprocServiceRegistry(request))); ThreadedFeedAccess feedAccess = new ThreadedFeedAccess(numThreads, sender); Feeder feeder = createFeeder(sender, request); feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError()); feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent()); response.setAbortOnFeedError(properties.getAbortOnFeedError()); List<String> errors = feeder.parse(); for (String s : errors) { response.addXMLParseError(s); } if (errors.size() > 0 && feeder instanceof XMLFeeder) { response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json."); } sender.done(); feedAccess.close(); if (asynchronous) { return response; } long millis = getTimeoutMillis(request); boolean completed = sender.waitForPending(millis); if (!completed) { response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses"); } response.done(); return response; } finally { busyThreads.decrementAndGet(); } }
class ThreadedFeedAccess implements SimpleFeedAccess { private final SimpleFeedAccess simpleFeedAccess; private final ExecutorService executor; ThreadedFeedAccess(int numThreads, SimpleFeedAccess simpleFeedAccess) { this.simpleFeedAccess = simpleFeedAccess; if (numThreads <= 0) { numThreads = Runtime.getRuntime().availableProcessors(); } executor = new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS, new SynchronousQueue<>(false), ThreadFactoryFactory.getDaemonThreadFactory("feeder"), new ThreadPoolExecutor.CallerRunsPolicy()); } @Override public void put(Document doc) { executor.execute(() -> simpleFeedAccess.put(doc)); } @Override public void remove(DocumentId docId) { executor.execute(() -> simpleFeedAccess.remove(docId)); } @Override public void update(DocumentUpdate update) { executor.execute(() -> simpleFeedAccess.update(update)); } @Override public void put(Document doc, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.put(doc, condition)); } @Override public void remove(DocumentId docId, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.remove(docId, condition)); } @Override public void update(DocumentUpdate update, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.update(update, condition)); } @Override public boolean isAborted() { return simpleFeedAccess.isAborted(); } void close() { executor.shutdown(); } }
class ThreadedFeedAccess implements SimpleFeedAccess { private final SimpleFeedAccess simpleFeedAccess; private final ExecutorService executorService; private final Executor executor; ThreadedFeedAccess(int numThreads, SimpleFeedAccess simpleFeedAccess) { this.simpleFeedAccess = simpleFeedAccess; if (numThreads <= 0) { numThreads = Runtime.getRuntime().availableProcessors(); } if (numThreads > 1) { executorService = new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS, new SynchronousQueue<>(false), ThreadFactoryFactory.getDaemonThreadFactory("feeder"), new ThreadPoolExecutor.CallerRunsPolicy()); executor = executorService; } else { executorService = null; executor = new Executor() { @Override public void execute(Runnable command) { command.run(); } }; } } @Override public void put(Document doc) { executor.execute(() -> simpleFeedAccess.put(doc)); } @Override public void remove(DocumentId docId) { executor.execute(() -> simpleFeedAccess.remove(docId)); } @Override public void update(DocumentUpdate update) { executor.execute(() -> simpleFeedAccess.update(update)); } @Override public void put(Document doc, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.put(doc, condition)); } @Override public void remove(DocumentId docId, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.remove(docId, condition)); } @Override public void update(DocumentUpdate update, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.update(update, condition)); } @Override public boolean isAborted() { return simpleFeedAccess.isAborted(); } @Override public void close() { if (executorService != null) { executorService.shutdown(); } } }
What does top-level class imply ?
public HttpResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) { if (request.getProperty("status") != null) { return new MetricResponse(context.getMetrics().getMetricSet()); } try { int busy = busyThreads.incrementAndGet(); if (busy > maxBusyThreads) return new EmptyResponse(com.yahoo.jdisc.http.HttpResponse.Status.SERVICE_UNAVAILABLE); boolean asynchronous = request.getBooleanProperty("asynchronous"); MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request); String route = properties.getRoute().toString(); FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback)); SingleSender sender = new SingleSender(response, getSharedSender(route), !asynchronous); sender.addMessageProcessor(properties); sender.addMessageProcessor(new DocprocMessageProcessor(getDocprocChain(request), getDocprocServiceRegistry(request))); SimpleFeedAccess feedAccess = sender; if (numThreads != 1) { feedAccess = new ThreadedFeedAccess(numThreads, feedAccess); } Feeder feeder = createFeeder(feedAccess, request); feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError()); feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent()); response.setAbortOnFeedError(properties.getAbortOnFeedError()); List<String> errors = feeder.parse(); for (String s : errors) { response.addXMLParseError(s); } if (errors.size() > 0 && feeder instanceof XMLFeeder) { response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json."); } sender.done(); if (feedAccess instanceof ThreadedFeedAccess) { ((ThreadedFeedAccess)feedAccess).close(); } if (asynchronous) { return response; } long millis = getTimeoutMillis(request); boolean completed = sender.waitForPending(millis); if (!completed) { response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses"); } response.done(); return response; } finally { busyThreads.decrementAndGet(); } }
}
public HttpResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) { if (request.getProperty("status") != null) { return new MetricResponse(context.getMetrics().getMetricSet()); } try { int busy = busyThreads.incrementAndGet(); if (busy > maxBusyThreads) return new EmptyResponse(com.yahoo.jdisc.http.HttpResponse.Status.SERVICE_UNAVAILABLE); boolean asynchronous = request.getBooleanProperty("asynchronous"); MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request); String route = properties.getRoute().toString(); FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback)); SingleSender sender = new SingleSender(response, getSharedSender(route), !asynchronous); sender.addMessageProcessor(properties); sender.addMessageProcessor(new DocprocMessageProcessor(getDocprocChain(request), getDocprocServiceRegistry(request))); ThreadedFeedAccess feedAccess = new ThreadedFeedAccess(numThreads, sender); Feeder feeder = createFeeder(sender, request); feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError()); feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent()); response.setAbortOnFeedError(properties.getAbortOnFeedError()); List<String> errors = feeder.parse(); for (String s : errors) { response.addXMLParseError(s); } if (errors.size() > 0 && feeder instanceof XMLFeeder) { response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json."); } sender.done(); feedAccess.close(); if (asynchronous) { return response; } long millis = getTimeoutMillis(request); boolean completed = sender.waitForPending(millis); if (!completed) { response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses"); } response.done(); return response; } finally { busyThreads.decrementAndGet(); } }
class ThreadedFeedAccess implements SimpleFeedAccess { private final SimpleFeedAccess simpleFeedAccess; private final ExecutorService executor; ThreadedFeedAccess(int numThreads, SimpleFeedAccess simpleFeedAccess) { this.simpleFeedAccess = simpleFeedAccess; if (numThreads <= 0) { numThreads = Runtime.getRuntime().availableProcessors(); } executor = new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS, new SynchronousQueue<>(false), ThreadFactoryFactory.getDaemonThreadFactory("feeder"), new ThreadPoolExecutor.CallerRunsPolicy()); } @Override public void put(Document doc) { executor.execute(() -> simpleFeedAccess.put(doc)); } @Override public void remove(DocumentId docId) { executor.execute(() -> simpleFeedAccess.remove(docId)); } @Override public void update(DocumentUpdate update) { executor.execute(() -> simpleFeedAccess.update(update)); } @Override public void put(Document doc, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.put(doc, condition)); } @Override public void remove(DocumentId docId, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.remove(docId, condition)); } @Override public void update(DocumentUpdate update, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.update(update, condition)); } @Override public boolean isAborted() { return simpleFeedAccess.isAborted(); } void close() { executor.shutdown(); } }
class ThreadedFeedAccess implements SimpleFeedAccess { private final SimpleFeedAccess simpleFeedAccess; private final ExecutorService executorService; private final Executor executor; ThreadedFeedAccess(int numThreads, SimpleFeedAccess simpleFeedAccess) { this.simpleFeedAccess = simpleFeedAccess; if (numThreads <= 0) { numThreads = Runtime.getRuntime().availableProcessors(); } if (numThreads > 1) { executorService = new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS, new SynchronousQueue<>(false), ThreadFactoryFactory.getDaemonThreadFactory("feeder"), new ThreadPoolExecutor.CallerRunsPolicy()); executor = executorService; } else { executorService = null; executor = new Executor() { @Override public void execute(Runnable command) { command.run(); } }; } } @Override public void put(Document doc) { executor.execute(() -> simpleFeedAccess.put(doc)); } @Override public void remove(DocumentId docId) { executor.execute(() -> simpleFeedAccess.remove(docId)); } @Override public void update(DocumentUpdate update) { executor.execute(() -> simpleFeedAccess.update(update)); } @Override public void put(Document doc, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.put(doc, condition)); } @Override public void remove(DocumentId docId, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.remove(docId, condition)); } @Override public void update(DocumentUpdate update, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.update(update, condition)); } @Override public boolean isAborted() { return simpleFeedAccess.isAborted(); } @Override public void close() { if (executorService != null) { executorService.shutdown(); } } }
Not nested inside another class.
public HttpResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) { if (request.getProperty("status") != null) { return new MetricResponse(context.getMetrics().getMetricSet()); } try { int busy = busyThreads.incrementAndGet(); if (busy > maxBusyThreads) return new EmptyResponse(com.yahoo.jdisc.http.HttpResponse.Status.SERVICE_UNAVAILABLE); boolean asynchronous = request.getBooleanProperty("asynchronous"); MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request); String route = properties.getRoute().toString(); FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback)); SingleSender sender = new SingleSender(response, getSharedSender(route), !asynchronous); sender.addMessageProcessor(properties); sender.addMessageProcessor(new DocprocMessageProcessor(getDocprocChain(request), getDocprocServiceRegistry(request))); SimpleFeedAccess feedAccess = sender; if (numThreads != 1) { feedAccess = new ThreadedFeedAccess(numThreads, feedAccess); } Feeder feeder = createFeeder(feedAccess, request); feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError()); feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent()); response.setAbortOnFeedError(properties.getAbortOnFeedError()); List<String> errors = feeder.parse(); for (String s : errors) { response.addXMLParseError(s); } if (errors.size() > 0 && feeder instanceof XMLFeeder) { response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json."); } sender.done(); if (feedAccess instanceof ThreadedFeedAccess) { ((ThreadedFeedAccess)feedAccess).close(); } if (asynchronous) { return response; } long millis = getTimeoutMillis(request); boolean completed = sender.waitForPending(millis); if (!completed) { response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses"); } response.done(); return response; } finally { busyThreads.decrementAndGet(); } }
}
public HttpResponse handle(HttpRequest request, RouteMetricSet.ProgressCallback callback, int numThreads) { if (request.getProperty("status") != null) { return new MetricResponse(context.getMetrics().getMetricSet()); } try { int busy = busyThreads.incrementAndGet(); if (busy > maxBusyThreads) return new EmptyResponse(com.yahoo.jdisc.http.HttpResponse.Status.SERVICE_UNAVAILABLE); boolean asynchronous = request.getBooleanProperty("asynchronous"); MessagePropertyProcessor.PropertySetter properties = getPropertyProcessor().buildPropertySetter(request); String route = properties.getRoute().toString(); FeedResponse response = new FeedResponse(new RouteMetricSet(route, callback)); SingleSender sender = new SingleSender(response, getSharedSender(route), !asynchronous); sender.addMessageProcessor(properties); sender.addMessageProcessor(new DocprocMessageProcessor(getDocprocChain(request), getDocprocServiceRegistry(request))); ThreadedFeedAccess feedAccess = new ThreadedFeedAccess(numThreads, sender); Feeder feeder = createFeeder(sender, request); feeder.setAbortOnDocumentError(properties.getAbortOnDocumentError()); feeder.setCreateIfNonExistent(properties.getCreateIfNonExistent()); response.setAbortOnFeedError(properties.getAbortOnFeedError()); List<String> errors = feeder.parse(); for (String s : errors) { response.addXMLParseError(s); } if (errors.size() > 0 && feeder instanceof XMLFeeder) { response.addXMLParseError("If you are trying to feed JSON, set the Content-Type header to application/json."); } sender.done(); feedAccess.close(); if (asynchronous) { return response; } long millis = getTimeoutMillis(request); boolean completed = sender.waitForPending(millis); if (!completed) { response.addError(Error.TIMEOUT, "Timed out after " + millis + " ms waiting for responses"); } response.done(); return response; } finally { busyThreads.decrementAndGet(); } }
class ThreadedFeedAccess implements SimpleFeedAccess { private final SimpleFeedAccess simpleFeedAccess; private final ExecutorService executor; ThreadedFeedAccess(int numThreads, SimpleFeedAccess simpleFeedAccess) { this.simpleFeedAccess = simpleFeedAccess; if (numThreads <= 0) { numThreads = Runtime.getRuntime().availableProcessors(); } executor = new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS, new SynchronousQueue<>(false), ThreadFactoryFactory.getDaemonThreadFactory("feeder"), new ThreadPoolExecutor.CallerRunsPolicy()); } @Override public void put(Document doc) { executor.execute(() -> simpleFeedAccess.put(doc)); } @Override public void remove(DocumentId docId) { executor.execute(() -> simpleFeedAccess.remove(docId)); } @Override public void update(DocumentUpdate update) { executor.execute(() -> simpleFeedAccess.update(update)); } @Override public void put(Document doc, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.put(doc, condition)); } @Override public void remove(DocumentId docId, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.remove(docId, condition)); } @Override public void update(DocumentUpdate update, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.update(update, condition)); } @Override public boolean isAborted() { return simpleFeedAccess.isAborted(); } void close() { executor.shutdown(); } }
class ThreadedFeedAccess implements SimpleFeedAccess { private final SimpleFeedAccess simpleFeedAccess; private final ExecutorService executorService; private final Executor executor; ThreadedFeedAccess(int numThreads, SimpleFeedAccess simpleFeedAccess) { this.simpleFeedAccess = simpleFeedAccess; if (numThreads <= 0) { numThreads = Runtime.getRuntime().availableProcessors(); } if (numThreads > 1) { executorService = new ThreadPoolExecutor(numThreads, numThreads, 0L, TimeUnit.SECONDS, new SynchronousQueue<>(false), ThreadFactoryFactory.getDaemonThreadFactory("feeder"), new ThreadPoolExecutor.CallerRunsPolicy()); executor = executorService; } else { executorService = null; executor = new Executor() { @Override public void execute(Runnable command) { command.run(); } }; } } @Override public void put(Document doc) { executor.execute(() -> simpleFeedAccess.put(doc)); } @Override public void remove(DocumentId docId) { executor.execute(() -> simpleFeedAccess.remove(docId)); } @Override public void update(DocumentUpdate update) { executor.execute(() -> simpleFeedAccess.update(update)); } @Override public void put(Document doc, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.put(doc, condition)); } @Override public void remove(DocumentId docId, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.remove(docId, condition)); } @Override public void update(DocumentUpdate update, TestAndSetCondition condition) { executor.execute(() -> simpleFeedAccess.update(update, condition)); } @Override public boolean isAborted() { return simpleFeedAccess.isAborted(); } @Override public void close() { if (executorService != null) { executorService.shutdown(); } } }
"case" should be indented more than "switch".
public void getConfig(DispatchConfig.Builder builder) { for (SearchNode node : getSearchNodes()) { DispatchConfig.Node.Builder nodeBuilder = new DispatchConfig.Node.Builder(); nodeBuilder.key(node.getDistributionKey()); nodeBuilder.group(node.getNodeSpec().groupIndex()); nodeBuilder.host(node.getHostName()); nodeBuilder.port(node.getRpcPort()); nodeBuilder.fs4port(node.getDispatchPort()); if (tuning.dispatch.minActiveDocsCoverage != null) builder.minActivedocsPercentage(tuning.dispatch.minActiveDocsCoverage); if (tuning.dispatch.minGroupCoverage != null) builder.minGroupCoverage(tuning.dispatch.minGroupCoverage); if (tuning.dispatch.policy != null) { switch(tuning.dispatch.policy) { case RANDOM: builder.distributionPolicy(DistributionPolicy.RANDOM); break; case ROUNDROBIN: builder.distributionPolicy(DistributionPolicy.ROUNDROBIN); break; } } builder.maxNodesDownPerGroup(rootDispatch.getMaxNodesDownPerFixedRow()); builder.node(nodeBuilder); } }
case RANDOM:
public void getConfig(DispatchConfig.Builder builder) { for (SearchNode node : getSearchNodes()) { DispatchConfig.Node.Builder nodeBuilder = new DispatchConfig.Node.Builder(); nodeBuilder.key(node.getDistributionKey()); nodeBuilder.group(node.getNodeSpec().groupIndex()); nodeBuilder.host(node.getHostName()); nodeBuilder.port(node.getRpcPort()); nodeBuilder.fs4port(node.getDispatchPort()); if (tuning.dispatch.minActiveDocsCoverage != null) builder.minActivedocsPercentage(tuning.dispatch.minActiveDocsCoverage); if (tuning.dispatch.minGroupCoverage != null) builder.minGroupCoverage(tuning.dispatch.minGroupCoverage); if (tuning.dispatch.policy != null) { switch (tuning.dispatch.policy) { case RANDOM: builder.distributionPolicy(DistributionPolicy.RANDOM); break; case ROUNDROBIN: builder.distributionPolicy(DistributionPolicy.ROUNDROBIN); break; } } builder.maxNodesDownPerGroup(rootDispatch.getMaxNodesDownPerFixedRow()); builder.node(nodeBuilder); } }
class UnionConfiguration extends AbstractConfigProducer implements AttributesConfig.Producer { private final List<DocumentDatabase> docDbs; public void getConfig(IndexInfoConfig.Builder builder) { for (DocumentDatabase docDb : docDbs) { docDb.getConfig(builder); } } public void getConfig(IlscriptsConfig.Builder builder) { for (DocumentDatabase docDb : docDbs) { docDb.getConfig(builder); } } @Override public void getConfig(AttributesConfig.Builder builder) { for (DocumentDatabase docDb : docDbs) { docDb.getConfig(builder); } } public void getConfig(RankProfilesConfig.Builder builder) { for (DocumentDatabase docDb : docDbs) { docDb.getConfig(builder); } } private UnionConfiguration(AbstractConfigProducer parent, List<DocumentDatabase> docDbs) { super(parent, "union"); this.docDbs = docDbs; } }
class UnionConfiguration extends AbstractConfigProducer implements AttributesConfig.Producer { private final List<DocumentDatabase> docDbs; public void getConfig(IndexInfoConfig.Builder builder) { for (DocumentDatabase docDb : docDbs) { docDb.getConfig(builder); } } public void getConfig(IlscriptsConfig.Builder builder) { for (DocumentDatabase docDb : docDbs) { docDb.getConfig(builder); } } @Override public void getConfig(AttributesConfig.Builder builder) { for (DocumentDatabase docDb : docDbs) { docDb.getConfig(builder); } } public void getConfig(RankProfilesConfig.Builder builder) { for (DocumentDatabase docDb : docDbs) { docDb.getConfig(builder); } } private UnionConfiguration(AbstractConfigProducer parent, List<DocumentDatabase> docDbs) { super(parent, "union"); this.docDbs = docDbs; } }
Nice. The logic of this is easy to follow now.
public void pingIterationCompleted() { int numGroups = orderedGroups.size(); if (numGroups == 1) { Group group = groups.values().iterator().next(); group.aggregateActiveDocuments(); updateSufficientCoverage(group, true); return; } long[] activeDocumentsInGroup = new long[numGroups]; long sumOfActiveDocuments = 0; for(int i = 0; i < numGroups; i++) { Group group = orderedGroups.get(i); group.aggregateActiveDocuments(); activeDocumentsInGroup[i] = group.getActiveDocuments(); sumOfActiveDocuments += activeDocumentsInGroup[i]; } for (int i = 0; i < numGroups; i++) { Group group = orderedGroups.get(i); long activeDocuments = activeDocumentsInGroup[i]; long averageDocumentsInOtherGroups = (sumOfActiveDocuments - activeDocuments) / (numGroups - 1); boolean sufficientCoverage = true; if (averageDocumentsInOtherGroups > 0) { double coverage = 100.0 * (double) activeDocuments / averageDocumentsInOtherGroups; sufficientCoverage = coverage >= minActivedocsCoveragePercentage; } if (sufficientCoverage) { sufficientCoverage = isNodeCoverageSufficient(group); } updateSufficientCoverage(group, sufficientCoverage); } }
int numGroups = orderedGroups.size();
public void pingIterationCompleted() { int numGroups = orderedGroups.size(); if (numGroups == 1) { Group group = groups.values().iterator().next(); group.aggregateActiveDocuments(); updateSufficientCoverage(group, true); return; } long[] activeDocumentsInGroup = new long[numGroups]; long sumOfActiveDocuments = 0; for(int i = 0; i < numGroups; i++) { Group group = orderedGroups.get(i); group.aggregateActiveDocuments(); activeDocumentsInGroup[i] = group.getActiveDocuments(); sumOfActiveDocuments += activeDocumentsInGroup[i]; } for (int i = 0; i < numGroups; i++) { Group group = orderedGroups.get(i); long activeDocuments = activeDocumentsInGroup[i]; long averageDocumentsInOtherGroups = (sumOfActiveDocuments - activeDocuments) / (numGroups - 1); boolean sufficientCoverage = true; if (averageDocumentsInOtherGroups > 0) { double coverage = 100.0 * (double) activeDocuments / averageDocumentsInOtherGroups; sufficientCoverage = coverage >= minActivedocsCoveragePercentage; } if (sufficientCoverage) { sufficientCoverage = isNodeCoverageSufficient(group); } updateSufficientCoverage(group, sufficientCoverage); } }
class SearchCluster implements NodeManager<SearchCluster.Node> { private static final Logger log = Logger.getLogger(SearchCluster.class.getName()); /** The min active docs a group must have to be considered up, as a % of the average active docs of the other groups */ public final double minActivedocsCoveragePercentage; public final double minGroupCoverage; public final int maxNodesDownPerGroup; private final int size; private final ImmutableMap<Integer, Group> groups; private final ImmutableMultimap<String, Node> nodesByHost; private final ImmutableList<Group> orderedGroups; private final ClusterMonitor<Node> clusterMonitor; private final VipStatus vipStatus; /** * A search node on this local machine having the entire corpus, which we therefore * should prefer to dispatch directly to, or empty if there is no such local search node. * If there is one, we also maintain the VIP status of this container based on the availability * of the corpus on this local node (up + has coverage), such that this node is taken out of rotation * if it only queries this cluster when the local node cannot be used, to avoid unnecessary * cross-node network traffic. */ private final Optional<Node> directDispatchTarget; private final FS4ResourcePool fs4ResourcePool; public SearchCluster(DispatchConfig dispatchConfig, FS4ResourcePool fs4ResourcePool, int containerClusterSize, VipStatus vipStatus) { this(dispatchConfig.minActivedocsPercentage(), dispatchConfig.minGroupCoverage(), dispatchConfig.maxNodesDownPerGroup(), toNodes(dispatchConfig), fs4ResourcePool, containerClusterSize, vipStatus); } public SearchCluster(double minActivedocsCoverage, double minGroupCoverage, int maxNodesDownPerGroup, List<Node> nodes, FS4ResourcePool fs4ResourcePool, int containerClusterSize, VipStatus vipStatus) { this.minActivedocsCoveragePercentage = minActivedocsCoverage; this.minGroupCoverage = minGroupCoverage; this.maxNodesDownPerGroup = maxNodesDownPerGroup; this.size = nodes.size(); this.fs4ResourcePool = fs4ResourcePool; this.vipStatus = vipStatus; ImmutableMap.Builder<Integer, Group> groupsBuilder = new ImmutableMap.Builder<>(); for (Map.Entry<Integer, List<Node>> group : nodes.stream().collect(Collectors.groupingBy(Node::group)).entrySet()) { Group g = new Group(group.getKey(), group.getValue()); groupsBuilder.put(group.getKey(), g); } this.groups = groupsBuilder.build(); LinkedHashMap<Integer, Group> groupIntroductionOrder = new LinkedHashMap<>(); nodes.forEach(node -> groupIntroductionOrder.put(node.group(), groups.get(node.group))); this.orderedGroups = ImmutableList.<Group>builder().addAll(groupIntroductionOrder.values()).build(); ImmutableMultimap.Builder<String, Node> nodesByHostBuilder = new ImmutableMultimap.Builder<>(); for (Node node : nodes) nodesByHostBuilder.put(node.hostname(), node); this.nodesByHost = nodesByHostBuilder.build(); this.directDispatchTarget = findDirectDispatchTarget(HostName.getLocalhost(), size, containerClusterSize, nodesByHost, groups); this.clusterMonitor = new ClusterMonitor<>(this); for (Node node : nodes) { working(node); clusterMonitor.add(node, true); } } private static Optional<Node> findDirectDispatchTarget(String selfHostname, int searchClusterSize, int containerClusterSize, ImmutableMultimap<String, Node>nodesByHost, ImmutableMap<Integer, Group> groups) { ImmutableCollection<Node> localSearchNodes = nodesByHost.get(selfHostname); if (localSearchNodes.size() != 1) return Optional.empty(); SearchCluster.Node localSearchNode = localSearchNodes.iterator().next(); SearchCluster.Group localSearchGroup = groups.get(localSearchNode.group()); if (localSearchGroup.nodes().size() != 1) return Optional.empty(); if (containerClusterSize < searchClusterSize) return Optional.empty(); return Optional.of(localSearchNode); } private static ImmutableList<Node> toNodes(DispatchConfig dispatchConfig) { ImmutableList.Builder<Node> nodesBuilder = new ImmutableList.Builder<>(); for (DispatchConfig.Node node : dispatchConfig.node()) nodesBuilder.add(new Node(node.key(), node.host(), node.fs4port(), node.group())); return nodesBuilder.build(); } /** Returns the number of nodes in this cluster (across all groups) */ public int size() { return size; } /** Returns the groups of this cluster as an immutable map indexed by group id */ public ImmutableMap<Integer, Group> groups() { return groups; } /** Returns the groups of this cluster as an immutable list in introduction order */ public ImmutableList<Group> orderedGroups() { return orderedGroups; } /** Returns the n'th (zero-indexed) group in the cluster if possible */ public Optional<Group> group(int n) { if (orderedGroups.size() > n) { return Optional.of(orderedGroups.get(n)); } else { return Optional.empty(); } } /** Returns the number of nodes per group - size()/groups.size() */ public int groupSize() { if (groups.size() == 0) return size(); return size() / groups.size(); } /** * Returns the nodes of this cluster as an immutable map indexed by host. * One host may contain multiple nodes (on different ports), so this is a multi-map. */ public ImmutableMultimap<String, Node> nodesByHost() { return nodesByHost; } /** * Returns the recipient we should dispatch queries directly to (bypassing fdispatch), * or empty if we should not dispatch directly. */ public Optional<Node> directDispatchTarget() { if ( ! directDispatchTarget.isPresent()) return Optional.empty(); SearchCluster.Group localSearchGroup = groups.get(directDispatchTarget.get().group()); if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty(); if ( ! directDispatchTarget.get().isWorking()) return Optional.empty(); return directDispatchTarget; } /** Used by the cluster monitor to manage node status */ @Override public void working(Node node) { node.setWorking(true); if (usesDirectDispatchTo(node)) vipStatus.addToRotation(this); } /** Used by the cluster monitor to manage node status */ @Override public void failed(Node node) { node.setWorking(false); if (usesDirectDispatchTo(node)) vipStatus.removeFromRotation(this); } private void updateSufficientCoverage(Group group, boolean sufficientCoverage) { if (usesDirectDispatchTo(group) && sufficientCoverage != group.hasSufficientCoverage()) { if (sufficientCoverage) { vipStatus.addToRotation(this); } else { vipStatus.removeFromRotation(this); } } group.setHasSufficientCoverage(sufficientCoverage); } private boolean usesDirectDispatchTo(Node node) { if ( ! directDispatchTarget.isPresent()) return false; return directDispatchTarget.get().equals(node); } private boolean usesDirectDispatchTo(Group group) { if ( ! directDispatchTarget.isPresent()) return false; return directDispatchTarget.get().group() == group.id(); } /** Used by the cluster monitor to manage node status */ @Override public void ping(Node node, Executor executor) { Pinger pinger = new Pinger(node); FutureTask<Pong> futurePong = new FutureTask<>(pinger); executor.execute(futurePong); Pong pong = getPong(futurePong, node); futurePong.cancel(true); if (pong.badResponse()) clusterMonitor.failed(node, pong.getError(0)); else clusterMonitor.responded(node); } /** * Update statistics after a round of issuing pings. * Note that this doesn't wait for pings to return, so it will typically accumulate data from * last rounds pinging, or potentially (although unlikely) some combination of new and old data. */ @Override private boolean isNodeCoverageSufficient(Group group) { int nodesUp = 0; for (Node node : group.nodes()) { if (node.isWorking()) { nodesUp++; } } int nodes = group.nodes().size(); int nodesAllowedDown = maxNodesDownPerGroup + (int) (((double) nodes * (100.0 - minGroupCoverage)) / 100.0); return nodesUp + nodesAllowedDown >= nodes; } private Pong getPong(FutureTask<Pong> futurePong, Node node) { try { return futurePong.get(clusterMonitor.getConfiguration().getFailLimit(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Exception pinging " + node, e); return new Pong(ErrorMessage.createUnspecifiedError("Ping was interrupted: " + node)); } catch (ExecutionException e) { log.log(Level.WARNING, "Exception pinging " + node, e); return new Pong(ErrorMessage.createUnspecifiedError("Execution was interrupted: " + node)); } catch (TimeoutException e) { return new Pong(ErrorMessage.createNoAnswerWhenPingingNode("Ping thread timed out")); } } private class Pinger implements Callable<Pong> { private final Node node; public Pinger(Node node) { this.node = node; } public Pong call() { try { Pong pong = FastSearcher.ping(new Ping(clusterMonitor.getConfiguration().getRequestTimeout()), fs4ResourcePool.getBackend(node.hostname(), node.fs4port()), node.toString()); if (pong.activeDocuments().isPresent()) node.setActiveDocuments(pong.activeDocuments().get()); return pong; } catch (RuntimeException e) { return new Pong(ErrorMessage.createBackendCommunicationError("Exception when pinging " + node + ": " + Exceptions.toMessageString(e))); } } } /** A group in a search cluster. This class is multithread safe. */ public static class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateActiveDocuments() { long activeDocumentsInGroup = 0; for (Node node : nodes) { if (node.isWorking()) { activeDocumentsInGroup += node.getActiveDocuments(); } } activeDocuments.set(activeDocumentsInGroup); } /** Returns the active documents on this node. If unknown, 0 is returned. */ long getActiveDocuments() { return this.activeDocuments.get(); } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } } /** A node in a search cluster. This class is multithread safe. */ public static class Node { private final int key; private final String hostname; private final int fs4port; private final int group; private final AtomicBoolean working = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); public Node(int key, String hostname, int fs4port, int group) { this.key = key; this.hostname = hostname; this.fs4port = fs4port; this.group = group; } /** Returns the unique and stable distribution key of this node */ public int key() { return key; } public String hostname() { return hostname; } public int fs4port() { return fs4port; } /** Returns the id of this group this node belongs to */ public int group() { return group; } void setWorking(boolean working) { this.working.lazySet(working); } /** Returns whether this node is currently responding to requests */ public boolean isWorking() { return working.get(); } /** Updates the active documents on this node */ void setActiveDocuments(long activeDocuments) { this.activeDocuments.set(activeDocuments); } /** Returns the active documents on this node. If unknown, 0 is returned. */ public long getActiveDocuments() { return this.activeDocuments.get(); } @Override public int hashCode() { return Objects.hash(hostname, fs4port); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! (o instanceof Node)) return false; Node other = (Node)o; if ( ! Objects.equals(this.hostname, other.hostname)) return false; if ( ! Objects.equals(this.fs4port, other.fs4port)) return false; return true; } @Override public String toString() { return "search node " + hostname + ":" + fs4port + " in group " + group; } } }
class SearchCluster implements NodeManager<SearchCluster.Node> { private static final Logger log = Logger.getLogger(SearchCluster.class.getName()); /** The min active docs a group must have to be considered up, as a % of the average active docs of the other groups */ private final double minActivedocsCoveragePercentage; private final double minGroupCoverage; private final int maxNodesDownPerGroup; private final int size; private final ImmutableMap<Integer, Group> groups; private final ImmutableMultimap<String, Node> nodesByHost; private final ImmutableList<Group> orderedGroups; private final ClusterMonitor<Node> clusterMonitor; private final VipStatus vipStatus; /** * A search node on this local machine having the entire corpus, which we therefore * should prefer to dispatch directly to, or empty if there is no such local search node. * If there is one, we also maintain the VIP status of this container based on the availability * of the corpus on this local node (up + has coverage), such that this node is taken out of rotation * if it only queries this cluster when the local node cannot be used, to avoid unnecessary * cross-node network traffic. */ private final Optional<Node> directDispatchTarget; private final FS4ResourcePool fs4ResourcePool; public SearchCluster(DispatchConfig dispatchConfig, FS4ResourcePool fs4ResourcePool, int containerClusterSize, VipStatus vipStatus) { this(dispatchConfig.minActivedocsPercentage(), dispatchConfig.minGroupCoverage(), dispatchConfig.maxNodesDownPerGroup(), toNodes(dispatchConfig), fs4ResourcePool, containerClusterSize, vipStatus); } public SearchCluster(double minActivedocsCoverage, double minGroupCoverage, int maxNodesDownPerGroup, List<Node> nodes, FS4ResourcePool fs4ResourcePool, int containerClusterSize, VipStatus vipStatus) { this.minActivedocsCoveragePercentage = minActivedocsCoverage; this.minGroupCoverage = minGroupCoverage; this.maxNodesDownPerGroup = maxNodesDownPerGroup; this.size = nodes.size(); this.fs4ResourcePool = fs4ResourcePool; this.vipStatus = vipStatus; ImmutableMap.Builder<Integer, Group> groupsBuilder = new ImmutableMap.Builder<>(); for (Map.Entry<Integer, List<Node>> group : nodes.stream().collect(Collectors.groupingBy(Node::group)).entrySet()) { Group g = new Group(group.getKey(), group.getValue()); groupsBuilder.put(group.getKey(), g); } this.groups = groupsBuilder.build(); LinkedHashMap<Integer, Group> groupIntroductionOrder = new LinkedHashMap<>(); nodes.forEach(node -> groupIntroductionOrder.put(node.group(), groups.get(node.group))); this.orderedGroups = ImmutableList.<Group>builder().addAll(groupIntroductionOrder.values()).build(); ImmutableMultimap.Builder<String, Node> nodesByHostBuilder = new ImmutableMultimap.Builder<>(); for (Node node : nodes) nodesByHostBuilder.put(node.hostname(), node); this.nodesByHost = nodesByHostBuilder.build(); this.directDispatchTarget = findDirectDispatchTarget(HostName.getLocalhost(), size, containerClusterSize, nodesByHost, groups); this.clusterMonitor = new ClusterMonitor<>(this); for (Node node : nodes) { working(node); clusterMonitor.add(node, true); } } private static Optional<Node> findDirectDispatchTarget(String selfHostname, int searchClusterSize, int containerClusterSize, ImmutableMultimap<String, Node>nodesByHost, ImmutableMap<Integer, Group> groups) { ImmutableCollection<Node> localSearchNodes = nodesByHost.get(selfHostname); if (localSearchNodes.size() != 1) return Optional.empty(); SearchCluster.Node localSearchNode = localSearchNodes.iterator().next(); SearchCluster.Group localSearchGroup = groups.get(localSearchNode.group()); if (localSearchGroup.nodes().size() != 1) return Optional.empty(); if (containerClusterSize < searchClusterSize) return Optional.empty(); return Optional.of(localSearchNode); } private static ImmutableList<Node> toNodes(DispatchConfig dispatchConfig) { ImmutableList.Builder<Node> nodesBuilder = new ImmutableList.Builder<>(); for (DispatchConfig.Node node : dispatchConfig.node()) nodesBuilder.add(new Node(node.key(), node.host(), node.fs4port(), node.group())); return nodesBuilder.build(); } /** Returns the number of nodes in this cluster (across all groups) */ public int size() { return size; } /** Returns the groups of this cluster as an immutable map indexed by group id */ public ImmutableMap<Integer, Group> groups() { return groups; } /** Returns the groups of this cluster as an immutable list in introduction order */ public ImmutableList<Group> orderedGroups() { return orderedGroups; } /** Returns the n'th (zero-indexed) group in the cluster if possible */ public Optional<Group> group(int n) { if (orderedGroups.size() > n) { return Optional.of(orderedGroups.get(n)); } else { return Optional.empty(); } } /** Returns the number of nodes per group - size()/groups.size() */ public int groupSize() { if (groups.size() == 0) return size(); return size() / groups.size(); } /** * Returns the nodes of this cluster as an immutable map indexed by host. * One host may contain multiple nodes (on different ports), so this is a multi-map. */ public ImmutableMultimap<String, Node> nodesByHost() { return nodesByHost; } /** * Returns the recipient we should dispatch queries directly to (bypassing fdispatch), * or empty if we should not dispatch directly. */ public Optional<Node> directDispatchTarget() { if ( ! directDispatchTarget.isPresent()) return Optional.empty(); SearchCluster.Group localSearchGroup = groups.get(directDispatchTarget.get().group()); if ( ! localSearchGroup.hasSufficientCoverage()) return Optional.empty(); if ( ! directDispatchTarget.get().isWorking()) return Optional.empty(); return directDispatchTarget; } /** Used by the cluster monitor to manage node status */ @Override public void working(Node node) { node.setWorking(true); if (usesDirectDispatchTo(node)) vipStatus.addToRotation(this); } /** Used by the cluster monitor to manage node status */ @Override public void failed(Node node) { node.setWorking(false); if (usesDirectDispatchTo(node)) vipStatus.removeFromRotation(this); } public void groupConnectionFailure(Group group) { group.setHasSufficientCoverage(false); } private void updateSufficientCoverage(Group group, boolean sufficientCoverage) { if (usesDirectDispatchTo(group) && sufficientCoverage != group.hasSufficientCoverage()) { if (sufficientCoverage) { vipStatus.addToRotation(this); } else { vipStatus.removeFromRotation(this); } } group.setHasSufficientCoverage(sufficientCoverage); } private boolean usesDirectDispatchTo(Node node) { if ( ! directDispatchTarget.isPresent()) return false; return directDispatchTarget.get().equals(node); } private boolean usesDirectDispatchTo(Group group) { if ( ! directDispatchTarget.isPresent()) return false; return directDispatchTarget.get().group() == group.id(); } /** Used by the cluster monitor to manage node status */ @Override public void ping(Node node, Executor executor) { Pinger pinger = new Pinger(node); FutureTask<Pong> futurePong = new FutureTask<>(pinger); executor.execute(futurePong); Pong pong = getPong(futurePong, node); futurePong.cancel(true); if (pong.badResponse()) clusterMonitor.failed(node, pong.getError(0)); else clusterMonitor.responded(node); } /** * Update statistics after a round of issuing pings. * Note that this doesn't wait for pings to return, so it will typically accumulate data from * last rounds pinging, or potentially (although unlikely) some combination of new and old data. */ @Override private boolean isNodeCoverageSufficient(Group group) { int nodesUp = 0; for (Node node : group.nodes()) { if (node.isWorking()) { nodesUp++; } } int nodes = group.nodes().size(); int nodesAllowedDown = maxNodesDownPerGroup + (int) (((double) nodes * (100.0 - minGroupCoverage)) / 100.0); return nodesUp + nodesAllowedDown >= nodes; } private Pong getPong(FutureTask<Pong> futurePong, Node node) { try { return futurePong.get(clusterMonitor.getConfiguration().getFailLimit(), TimeUnit.MILLISECONDS); } catch (InterruptedException e) { log.log(Level.WARNING, "Exception pinging " + node, e); return new Pong(ErrorMessage.createUnspecifiedError("Ping was interrupted: " + node)); } catch (ExecutionException e) { log.log(Level.WARNING, "Exception pinging " + node, e); return new Pong(ErrorMessage.createUnspecifiedError("Execution was interrupted: " + node)); } catch (TimeoutException e) { return new Pong(ErrorMessage.createNoAnswerWhenPingingNode("Ping thread timed out")); } } private class Pinger implements Callable<Pong> { private final Node node; public Pinger(Node node) { this.node = node; } public Pong call() { try { Pong pong = FastSearcher.ping(new Ping(clusterMonitor.getConfiguration().getRequestTimeout()), fs4ResourcePool.getBackend(node.hostname(), node.fs4port()), node.toString()); if (pong.activeDocuments().isPresent()) node.setActiveDocuments(pong.activeDocuments().get()); return pong; } catch (RuntimeException e) { return new Pong(ErrorMessage.createBackendCommunicationError("Exception when pinging " + node + ": " + Exceptions.toMessageString(e))); } } } /** A group in a search cluster. This class is multithread safe. */ public static class Group { private final int id; private final ImmutableList<Node> nodes; private final AtomicBoolean hasSufficientCoverage = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); public Group(int id, List<Node> nodes) { this.id = id; this.nodes = ImmutableList.copyOf(nodes); } /** Returns the unique identity of this group */ public int id() { return id; } /** Returns the nodes in this group as an immutable list */ public ImmutableList<Node> nodes() { return nodes; } /** * Returns whether this group has sufficient active documents * (compared to other groups) that is should receive traffic */ public boolean hasSufficientCoverage() { return hasSufficientCoverage.get(); } void setHasSufficientCoverage(boolean sufficientCoverage) { hasSufficientCoverage.lazySet(sufficientCoverage); } void aggregateActiveDocuments() { long activeDocumentsInGroup = 0; for (Node node : nodes) { if (node.isWorking()) { activeDocumentsInGroup += node.getActiveDocuments(); } } activeDocuments.set(activeDocumentsInGroup); } /** Returns the active documents on this node. If unknown, 0 is returned. */ long getActiveDocuments() { return this.activeDocuments.get(); } @Override public String toString() { return "search group " + id; } @Override public int hashCode() { return id; } @Override public boolean equals(Object other) { if (other == this) return true; if (!(other instanceof Group)) return false; return ((Group) other).id == this.id; } } /** A node in a search cluster. This class is multithread safe. */ public static class Node { private final int key; private final String hostname; private final int fs4port; private final int group; private final AtomicBoolean working = new AtomicBoolean(true); private final AtomicLong activeDocuments = new AtomicLong(0); public Node(int key, String hostname, int fs4port, int group) { this.key = key; this.hostname = hostname; this.fs4port = fs4port; this.group = group; } /** Returns the unique and stable distribution key of this node */ public int key() { return key; } public String hostname() { return hostname; } public int fs4port() { return fs4port; } /** Returns the id of this group this node belongs to */ public int group() { return group; } void setWorking(boolean working) { this.working.lazySet(working); } /** Returns whether this node is currently responding to requests */ public boolean isWorking() { return working.get(); } /** Updates the active documents on this node */ void setActiveDocuments(long activeDocuments) { this.activeDocuments.set(activeDocuments); } /** Returns the active documents on this node. If unknown, 0 is returned. */ public long getActiveDocuments() { return this.activeDocuments.get(); } @Override public int hashCode() { return Objects.hash(hostname, fs4port); } @Override public boolean equals(Object o) { if (o == this) return true; if ( ! (o instanceof Node)) return false; Node other = (Node)o; if ( ! Objects.equals(this.hostname, other.hostname)) return false; if ( ! Objects.equals(this.fs4port, other.fs4port)) return false; return true; } @Override public String toString() { return "search node " + hostname + ":" + fs4port + " in group " + group; } } }
We will never choose a group with any connectivity issues? Consider the case of two large groups with some minor connectivity issues such that one node in each group is down. And the case of just one node down and as a consequence the other group receiving all the queries and thus becoming overloaded. I think that to do this correctly you need two passes, but then you start duplicating the logic where you make holistic group decisions in ping complete. Why did you want to make decisions based on connectivity here? Can't we just stick to the decisions made after pinging?
public Optional<SearchInvoker> getSearchInvoker(Query query, List<SearchCluster.Node> nodes) { Map<Integer, SearchInvoker> invokers = new HashMap<>(); for (SearchCluster.Node node : nodes) { if (node.isWorking()) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); if (backend.probeConnection()) { invokers.put(node.key(), new FS4SearchInvoker(searcher, query, backend.openChannel(), node)); } else { return Optional.empty(); } } } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedSearchInvoker(invokers)); } }
return Optional.empty();
public Optional<SearchInvoker> getSearchInvoker(Query query, List<SearchCluster.Node> nodes) { Map<Integer, SearchInvoker> invokers = new HashMap<>(); for (SearchCluster.Node node : nodes) { if (node.isWorking()) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); if (backend.probeConnection()) { invokers.put(node.key(), new FS4SearchInvoker(searcher, query, backend.openChannel(), node)); } else { return Optional.empty(); } } } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedSearchInvoker(invokers)); } }
class FS4InvokerFactory { private final FS4ResourcePool fs4ResourcePool; private final VespaBackEndSearcher searcher; private final ImmutableMap<Integer, SearchCluster.Node> nodesByKey; public FS4InvokerFactory(FS4ResourcePool fs4ResourcePool, SearchCluster searchCluster, VespaBackEndSearcher searcher) { this.fs4ResourcePool = fs4ResourcePool; this.searcher = searcher; ImmutableMap.Builder<Integer, SearchCluster.Node> builder = ImmutableMap.builder(); searchCluster.groups().values().forEach(group -> group.nodes().forEach(node -> builder.put(node.key(), node))); this.nodesByKey = builder.build(); } public SearchInvoker getSearchInvoker(Query query, SearchCluster.Node node) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); return new FS4SearchInvoker(searcher, query, backend.openChannel(), node); } public FillInvoker getFillInvoker(Query query, SearchCluster.Node node) { return new FS4FillInvoker(searcher, query, fs4ResourcePool, node.hostname(), node.fs4port(), node.key()); } public Optional<FillInvoker> getFillInvoker(Result result) { Collection<Integer> requiredNodes = requiredFillNodes(result); Query query = result.getQuery(); Map<Integer, FillInvoker> invokers = new HashMap<>(); for (Integer distKey : requiredNodes) { SearchCluster.Node node = nodesByKey.get(distKey); if (node == null) { return Optional.empty(); } invokers.put(distKey, getFillInvoker(query, node)); } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedFillInvoker(invokers)); } } private static Collection<Integer> requiredFillNodes(Result result) { Set<Integer> requiredNodes = new HashSet<>(); for (Iterator<Hit> i = result.hits().unorderedDeepIterator(); i.hasNext();) { Hit h = i.next(); if (h instanceof FastHit) { FastHit hit = (FastHit) h; requiredNodes.add(hit.getDistributionKey()); } } return requiredNodes; } }
class FS4InvokerFactory { private final FS4ResourcePool fs4ResourcePool; private final VespaBackEndSearcher searcher; private final ImmutableMap<Integer, SearchCluster.Node> nodesByKey; public FS4InvokerFactory(FS4ResourcePool fs4ResourcePool, SearchCluster searchCluster, VespaBackEndSearcher searcher) { this.fs4ResourcePool = fs4ResourcePool; this.searcher = searcher; ImmutableMap.Builder<Integer, SearchCluster.Node> builder = ImmutableMap.builder(); searchCluster.groups().values().forEach(group -> group.nodes().forEach(node -> builder.put(node.key(), node))); this.nodesByKey = builder.build(); } public SearchInvoker getSearchInvoker(Query query, SearchCluster.Node node) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); return new FS4SearchInvoker(searcher, query, backend.openChannel(), node); } /** * Create a {@link SearchInvoker} for a list of content nodes. * * @param query the search query being processed * @param nodes pre-selected list of content nodes * @return Optional containing the SearchInvoker or <i>empty</i> if some node in the list is invalid */ public FillInvoker getFillInvoker(Query query, SearchCluster.Node node) { return new FS4FillInvoker(searcher, query, fs4ResourcePool, node.hostname(), node.fs4port(), node.key()); } /** * Create a {@link FillInvoker} for a the hits in a {@link Result}. * * @param result the Result containing hits that need to be filled * @return Optional containing the FillInvoker or <i>empty</i> if some hit is from an unknown content node */ public Optional<FillInvoker> getFillInvoker(Result result) { Collection<Integer> requiredNodes = requiredFillNodes(result); Query query = result.getQuery(); Map<Integer, FillInvoker> invokers = new HashMap<>(); for (Integer distKey : requiredNodes) { SearchCluster.Node node = nodesByKey.get(distKey); if (node == null) { return Optional.empty(); } invokers.put(distKey, getFillInvoker(query, node)); } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedFillInvoker(invokers)); } } private static Collection<Integer> requiredFillNodes(Result result) { Set<Integer> requiredNodes = new HashSet<>(); for (Iterator<Hit> i = result.hits().unorderedDeepIterator(); i.hasNext();) { Hit h = i.next(); if (h instanceof FastHit) { FastHit hit = (FastHit) h; requiredNodes.add(hit.getDistributionKey()); } } return requiredNodes; } }
I added the probing because in the system test waiting for the pings just wasn't responsive enough to behave correctly. Bailing out here happens when the connectivity fails right now and the query would effectively already fail. The loop in Dispatcher that retries other groups is maybe more suspect -- it tries *number-of-groups* times to find a connected group and tells SearchCluster to fail a group until the next ping iteration (in <1s, though). Perhaps a better version would be to try at most N times (N being 2 or 3) and get that list from the LB in one go. The last choice could be used forcibly without regard for connectivity issues (if *some* result is better than *no* result). SearchCluster could also be extended to take the connection failure with a grain of salt and only fail the group once a threshold is hit. Assuming the connection failure is intermittent the queries-in-flight counter should take care of balancing the load.
public Optional<SearchInvoker> getSearchInvoker(Query query, List<SearchCluster.Node> nodes) { Map<Integer, SearchInvoker> invokers = new HashMap<>(); for (SearchCluster.Node node : nodes) { if (node.isWorking()) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); if (backend.probeConnection()) { invokers.put(node.key(), new FS4SearchInvoker(searcher, query, backend.openChannel(), node)); } else { return Optional.empty(); } } } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedSearchInvoker(invokers)); } }
return Optional.empty();
public Optional<SearchInvoker> getSearchInvoker(Query query, List<SearchCluster.Node> nodes) { Map<Integer, SearchInvoker> invokers = new HashMap<>(); for (SearchCluster.Node node : nodes) { if (node.isWorking()) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); if (backend.probeConnection()) { invokers.put(node.key(), new FS4SearchInvoker(searcher, query, backend.openChannel(), node)); } else { return Optional.empty(); } } } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedSearchInvoker(invokers)); } }
class FS4InvokerFactory { private final FS4ResourcePool fs4ResourcePool; private final VespaBackEndSearcher searcher; private final ImmutableMap<Integer, SearchCluster.Node> nodesByKey; public FS4InvokerFactory(FS4ResourcePool fs4ResourcePool, SearchCluster searchCluster, VespaBackEndSearcher searcher) { this.fs4ResourcePool = fs4ResourcePool; this.searcher = searcher; ImmutableMap.Builder<Integer, SearchCluster.Node> builder = ImmutableMap.builder(); searchCluster.groups().values().forEach(group -> group.nodes().forEach(node -> builder.put(node.key(), node))); this.nodesByKey = builder.build(); } public SearchInvoker getSearchInvoker(Query query, SearchCluster.Node node) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); return new FS4SearchInvoker(searcher, query, backend.openChannel(), node); } public FillInvoker getFillInvoker(Query query, SearchCluster.Node node) { return new FS4FillInvoker(searcher, query, fs4ResourcePool, node.hostname(), node.fs4port(), node.key()); } public Optional<FillInvoker> getFillInvoker(Result result) { Collection<Integer> requiredNodes = requiredFillNodes(result); Query query = result.getQuery(); Map<Integer, FillInvoker> invokers = new HashMap<>(); for (Integer distKey : requiredNodes) { SearchCluster.Node node = nodesByKey.get(distKey); if (node == null) { return Optional.empty(); } invokers.put(distKey, getFillInvoker(query, node)); } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedFillInvoker(invokers)); } } private static Collection<Integer> requiredFillNodes(Result result) { Set<Integer> requiredNodes = new HashSet<>(); for (Iterator<Hit> i = result.hits().unorderedDeepIterator(); i.hasNext();) { Hit h = i.next(); if (h instanceof FastHit) { FastHit hit = (FastHit) h; requiredNodes.add(hit.getDistributionKey()); } } return requiredNodes; } }
class FS4InvokerFactory { private final FS4ResourcePool fs4ResourcePool; private final VespaBackEndSearcher searcher; private final ImmutableMap<Integer, SearchCluster.Node> nodesByKey; public FS4InvokerFactory(FS4ResourcePool fs4ResourcePool, SearchCluster searchCluster, VespaBackEndSearcher searcher) { this.fs4ResourcePool = fs4ResourcePool; this.searcher = searcher; ImmutableMap.Builder<Integer, SearchCluster.Node> builder = ImmutableMap.builder(); searchCluster.groups().values().forEach(group -> group.nodes().forEach(node -> builder.put(node.key(), node))); this.nodesByKey = builder.build(); } public SearchInvoker getSearchInvoker(Query query, SearchCluster.Node node) { Backend backend = fs4ResourcePool.getBackend(node.hostname(), node.fs4port()); return new FS4SearchInvoker(searcher, query, backend.openChannel(), node); } /** * Create a {@link SearchInvoker} for a list of content nodes. * * @param query the search query being processed * @param nodes pre-selected list of content nodes * @return Optional containing the SearchInvoker or <i>empty</i> if some node in the list is invalid */ public FillInvoker getFillInvoker(Query query, SearchCluster.Node node) { return new FS4FillInvoker(searcher, query, fs4ResourcePool, node.hostname(), node.fs4port(), node.key()); } /** * Create a {@link FillInvoker} for a the hits in a {@link Result}. * * @param result the Result containing hits that need to be filled * @return Optional containing the FillInvoker or <i>empty</i> if some hit is from an unknown content node */ public Optional<FillInvoker> getFillInvoker(Result result) { Collection<Integer> requiredNodes = requiredFillNodes(result); Query query = result.getQuery(); Map<Integer, FillInvoker> invokers = new HashMap<>(); for (Integer distKey : requiredNodes) { SearchCluster.Node node = nodesByKey.get(distKey); if (node == null) { return Optional.empty(); } invokers.put(distKey, getFillInvoker(query, node)); } if (invokers.size() == 1) { return Optional.of(invokers.values().iterator().next()); } else { return Optional.of(new InterleavedFillInvoker(invokers)); } } private static Collection<Integer> requiredFillNodes(Result result) { Set<Integer> requiredNodes = new HashSet<>(); for (Iterator<Hit> i = result.hits().unorderedDeepIterator(); i.hasNext();) { Hit h = i.next(); if (h instanceof FastHit) { FastHit hit = (FastHit) h; requiredNodes.add(hit.getDistributionKey()); } } return requiredNodes; } }
How does this default value relate to the default from config definition?
public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); if (owningCluster.isHostedVespa()) { if ((owningCluster.getZone().environment() != Environment.prod) || RegionName.from("us-east-3").equals(owningCluster.getZone().region())) { qsB.jvm.gcopts("-XX:-UseConcMarkSweepGC -XX:+UseG1GC -XX:MaxTenuringThreshold=15"); } else { qsB.jvm.gcopts("-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"); } } else { qsB.jvm.gcopts("-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"); } }
qsB.jvm.gcopts("-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1");
public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); qsB.jvm.gcopts(buildGCOpts(owningCluster.getZone())); }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } @Override private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { public Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } private String buildGCOpts(Zone zone) { Optional<String> gcopts = owningCluster.getGCOpts(); if (gcopts.isPresent()) { return gcopts.get(); } else if (zone.system() == SystemName.dev) { return ContainerCluster.G1GC; } else if (owningCluster.isHostedVespa()) { return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region())) ? ContainerCluster.G1GC : ContainerCluster.CMS; } else { return ContainerCluster.CMS; } } @Override private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
Is this safe? Perhaps we should only enable G1 for dev in hosted until Vespa 7.
private String buildGCOpts(Zone zone) { Optional<String> gcopts = owningCluster.getGCOpts(); if (gcopts.isPresent()) { return gcopts.get(); } else if (zone.system() == SystemName.dev) { return ContainerCluster.G1GC; } else if (owningCluster.isHostedVespa()) { return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region())) ? ContainerCluster.G1GC : ContainerCluster.CMS; } else { return ContainerCluster.CMS; } }
} else if (zone.system() == SystemName.dev) {
private String buildGCOpts(Zone zone) { Optional<String> gcopts = owningCluster.getGCOpts(); if (gcopts.isPresent()) { return gcopts.get(); } else if (zone.system() == SystemName.dev) { return ContainerCluster.G1GC; } else if (owningCluster.isHostedVespa()) { return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region())) ? ContainerCluster.G1GC : ContainerCluster.CMS; } else { return ContainerCluster.CMS; } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); qsB.jvm.gcopts(buildGCOpts(owningCluster.getZone())); } private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); qsB.jvm.gcopts(buildGCOpts(owningCluster.getZone())); } private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
It's okay to have this logic here since it's temporary, otherwise is should be moved config-model-fat-amended.
private String buildGCOpts(Zone zone) { Optional<String> gcopts = owningCluster.getGCOpts(); if (gcopts.isPresent()) { return gcopts.get(); } else if (zone.system() == SystemName.dev) { return ContainerCluster.G1GC; } else if (owningCluster.isHostedVespa()) { return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region())) ? ContainerCluster.G1GC : ContainerCluster.CMS; } else { return ContainerCluster.CMS; } }
} else if (owningCluster.isHostedVespa()) {
private String buildGCOpts(Zone zone) { Optional<String> gcopts = owningCluster.getGCOpts(); if (gcopts.isPresent()) { return gcopts.get(); } else if (zone.system() == SystemName.dev) { return ContainerCluster.G1GC; } else if (owningCluster.isHostedVespa()) { return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region())) ? ContainerCluster.G1GC : ContainerCluster.CMS; } else { return ContainerCluster.CMS; } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); qsB.jvm.gcopts(buildGCOpts(owningCluster.getZone())); } private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); qsB.jvm.gcopts(buildGCOpts(owningCluster.getZone())); } private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
Oops, forgot to remove this thing.
public void staggering() { System.err.println(3 * 2 / 3); }
System.err.println(3 * 2 / 3);
public void staggering() { List<HostName> cluster = Arrays.asList(HostName.from("cfg1"), HostName.from("cfg2"), HostName.from("cfg3")); Instant now = Instant.ofEpochMilli(1001); Duration interval = Duration.ofMillis(300); assertEquals(299, Maintainer.staggeredDelay(cluster, HostName.from("cfg1"), now, interval)); assertEquals(399, Maintainer.staggeredDelay(cluster, HostName.from("cfg2"), now, interval)); assertEquals(199, Maintainer.staggeredDelay(cluster, HostName.from("cfg3"), now, interval)); now = Instant.ofEpochMilli(1101); assertEquals(199, Maintainer.staggeredDelay(cluster, HostName.from("cfg1"), now, interval)); assertEquals(299, Maintainer.staggeredDelay(cluster, HostName.from("cfg2"), now, interval)); assertEquals(399, Maintainer.staggeredDelay(cluster, HostName.from("cfg3"), now, interval)); assertEquals(300, Maintainer.staggeredDelay(cluster, HostName.from("cfg0"), now, interval)); }
class MaintainerTest { private ControllerTester tester; @Before public void before() { tester = new ControllerTester(); } @Test public void only_runs_in_permitted_systems() { AtomicInteger executions = new AtomicInteger(); maintainerIn(SystemName.cd, executions).run(); maintainerIn(SystemName.main, executions).run(); assertEquals(1, executions.get()); } @Test private Maintainer maintainerIn(SystemName system, AtomicInteger executions) { return new Maintainer(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()), "MockMaintainer", EnumSet.of(system)) { @Override protected void maintain() { executions.incrementAndGet(); } }; } }
class MaintainerTest { private ControllerTester tester; @Before public void before() { tester = new ControllerTester(); } @Test public void only_runs_in_permitted_systems() { AtomicInteger executions = new AtomicInteger(); maintainerIn(SystemName.cd, executions).run(); maintainerIn(SystemName.main, executions).run(); assertEquals(1, executions.get()); } @Test private Maintainer maintainerIn(SystemName system, AtomicInteger executions) { return new Maintainer(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()), "MockMaintainer", EnumSet.of(system)) { @Override protected void maintain() { executions.incrementAndGet(); } }; } }
Currently identical.
public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); if (owningCluster.isHostedVespa()) { if ((owningCluster.getZone().environment() != Environment.prod) || RegionName.from("us-east-3").equals(owningCluster.getZone().region())) { qsB.jvm.gcopts("-XX:-UseConcMarkSweepGC -XX:+UseG1GC -XX:MaxTenuringThreshold=15"); } else { qsB.jvm.gcopts("-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"); } } else { qsB.jvm.gcopts("-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"); } }
qsB.jvm.gcopts("-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1");
public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); qsB.jvm.gcopts(buildGCOpts(owningCluster.getZone())); }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } @Override private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { public Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } private String buildGCOpts(Zone zone) { Optional<String> gcopts = owningCluster.getGCOpts(); if (gcopts.isPresent()) { return gcopts.get(); } else if (zone.system() == SystemName.dev) { return ContainerCluster.G1GC; } else if (owningCluster.isHostedVespa()) { return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region())) ? ContainerCluster.G1GC : ContainerCluster.CMS; } else { return ContainerCluster.CMS; } } @Override private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
No, we do not want to wait. For the applications there shall be no difference going from 6.x to 7. compared to 6.x to 6.x+1 unless you are using deprecated features. Then it will break.
private String buildGCOpts(Zone zone) { Optional<String> gcopts = owningCluster.getGCOpts(); if (gcopts.isPresent()) { return gcopts.get(); } else if (zone.system() == SystemName.dev) { return ContainerCluster.G1GC; } else if (owningCluster.isHostedVespa()) { return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region())) ? ContainerCluster.G1GC : ContainerCluster.CMS; } else { return ContainerCluster.CMS; } }
} else if (zone.system() == SystemName.dev) {
private String buildGCOpts(Zone zone) { Optional<String> gcopts = owningCluster.getGCOpts(); if (gcopts.isPresent()) { return gcopts.get(); } else if (zone.system() == SystemName.dev) { return ContainerCluster.G1GC; } else if (owningCluster.isHostedVespa()) { return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region())) ? ContainerCluster.G1GC : ContainerCluster.CMS; } else { return ContainerCluster.CMS; } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); qsB.jvm.gcopts(buildGCOpts(owningCluster.getZone())); } private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); qsB.jvm.gcopts(buildGCOpts(owningCluster.getZone())); } private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
Agree, hopefylly it can go away on thuesday.
private String buildGCOpts(Zone zone) { Optional<String> gcopts = owningCluster.getGCOpts(); if (gcopts.isPresent()) { return gcopts.get(); } else if (zone.system() == SystemName.dev) { return ContainerCluster.G1GC; } else if (owningCluster.isHostedVespa()) { return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region())) ? ContainerCluster.G1GC : ContainerCluster.CMS; } else { return ContainerCluster.CMS; } }
} else if (owningCluster.isHostedVespa()) {
private String buildGCOpts(Zone zone) { Optional<String> gcopts = owningCluster.getGCOpts(); if (gcopts.isPresent()) { return gcopts.get(); } else if (zone.system() == SystemName.dev) { return ContainerCluster.G1GC; } else if (owningCluster.isHostedVespa()) { return ((zone.environment() != Environment.prod) || RegionName.from("us-east-3").equals(zone.region())) ? ContainerCluster.G1GC : ContainerCluster.CMS; } else { return ContainerCluster.CMS; } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); qsB.jvm.gcopts(buildGCOpts(owningCluster.getZone())); } private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
class ContainerSearch extends ContainerSubsystem<SearchChains> implements IndexInfoConfig.Producer, IlscriptsConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, SemanticRulesConfig.Producer, PageTemplatesConfig.Producer { private final List<AbstractSearchCluster> systems = new LinkedList<>(); private final Options options; private QueryProfiles queryProfiles; private SemanticRules semanticRules; private PageTemplates pageTemplates; private final ContainerCluster owningCluster; public ContainerSearch(ContainerCluster cluster, SearchChains chains, Options options) { super(chains); this.options = options; this.owningCluster = cluster; cluster.addComponent(getFS4ResourcePool()); } private Component<?, ComponentModel> getFS4ResourcePool() { BundleInstantiationSpecification spec = BundleInstantiationSpecification. getInternalSearcherSpecificationFromStrings(FS4ResourcePool.class.getName(), null); return new Component<>(new ComponentModel(spec)); } public void connectSearchClusters(Map<String, AbstractSearchCluster> searchClusters) { systems.addAll(searchClusters.values()); initializeSearchChains(searchClusters); } public void initializeSearchChains(Map<String, ? extends AbstractSearchCluster> searchClusters) { getChains().initialize(searchClusters); QrsCache defaultCacheOptions = getOptions().cacheSettings.get(""); if (defaultCacheOptions != null) { for (LocalProvider localProvider: getChains().localProviders()) { localProvider.setCacheSize(defaultCacheOptions.size); } } for (LocalProvider localProvider: getChains().localProviders()) { QrsCache cacheOptions = getOptions().cacheSettings.get(localProvider.getClusterName()); if (cacheOptions != null) { localProvider.setCacheSize(cacheOptions.size); } } } public void setQueryProfiles(QueryProfiles queryProfiles) { this.queryProfiles = queryProfiles; } public void setSemanticRules(SemanticRules semanticRules) { this.semanticRules = semanticRules; } public void setPageTemplates(PageTemplates pageTemplates) { this.pageTemplates = pageTemplates; } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (queryProfiles!=null) queryProfiles.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (semanticRules!=null) semanticRules.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (pageTemplates!=null) pageTemplates.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder qsB) { QrStartConfig.Jvm.Builder internalBuilder = new QrStartConfig.Jvm.Builder(); if (owningCluster.getMemoryPercentage().isPresent()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getMemoryPercentage().get()); } else if (owningCluster.isHostedVespa()) { internalBuilder.heapSizeAsPercentageOfPhysicalMemory(owningCluster.getHostClusterId().isPresent() ? 17 : 60); } qsB.jvm(internalBuilder.directMemorySizeCache(totalCacheSizeMb())); qsB.jvm.gcopts(buildGCOpts(owningCluster.getZone())); } private int totalCacheSizeMb() { return totalHttpProviderCacheSize(); } private int totalHttpProviderCacheSize() { int totalCacheSizeMb = 0; for (HttpProvider provider: getChains().httpProviders()) totalCacheSizeMb += provider.cacheSizeMB(); return totalCacheSizeMb; } @Override public void getConfig(IndexInfoConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(IlscriptsConfig.Builder builder) { for (AbstractSearchCluster sc : systems) { sc.getConfig(builder); } } @Override public void getConfig(QrSearchersConfig.Builder builder) { for (int i = 0; i < systems.size(); i++) { AbstractSearchCluster sys = findClusterWithId(systems, i); QrSearchersConfig.Searchcluster.Builder scB = new QrSearchersConfig.Searchcluster.Builder(). name(sys.getClusterName()); for (AbstractSearchCluster.SearchDefinitionSpec spec : sys.getLocalSDS()) { scB.searchdef(spec.getSearchDefinition().getName()); } scB.rankprofiles(new QrSearchersConfig.Searchcluster.Rankprofiles.Builder().configid(sys.getConfigId())); scB.indexingmode(QrSearchersConfig.Searchcluster.Indexingmode.Enum.valueOf(sys.getIndexingModeName())); if (sys instanceof IndexedSearchCluster) { for (Dispatch tld: ((IndexedSearchCluster)sys).getTLDs()) { scB.dispatcher(new QrSearchersConfig.Searchcluster.Dispatcher.Builder(). host(tld.getHostname()). port(tld.getDispatchPort())); } } else { scB.storagecluster(new QrSearchersConfig.Searchcluster.Storagecluster.Builder(). routespec(((StreamingSearchCluster)sys).getStorageRouteSpec())); } builder.searchcluster(scB); } } private static AbstractSearchCluster findClusterWithId(List<AbstractSearchCluster> clusters, int index) { for (AbstractSearchCluster sys : clusters) { if (sys.getClusterIndex() == index) { return sys; } } throw new IllegalArgumentException("No search cluster with index " + index + " exists"); } public Options getOptions() { return options; } /** * Struct that encapsulates qrserver options. */ public static class Options { Map<String, QrsCache> cacheSettings = new LinkedHashMap<>(); } }
A line break has run off into the night
public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; }
return null; }
public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); } @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); } @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
Won't this always return `null` instead of `prev` since the update has already been removed from `fieldUpdates`?
public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); }
return removeFieldUpdate(prev.getField());
public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); } @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); } @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
Will `createIfNonExistent` always have a set `Boolean` at this point? Otherwise, this will now print `null` instead of `false` when not set explicitly. Could use `getCreateIfNonExistent()` instead.
public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); }
string.append(createIfNonExistent);
public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); } public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); } public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
Remove (now presumably outdated) TODO?
public void read(DocumentUpdate update) { short serializationVersion = getShort(null); update.setId(new DocumentId(this)); byte contents = getByte(null); if ((contents & 0x1) == 0) { throw new DeserializationException("Cannot deserialize DocumentUpdate without doctype"); } update.setDocumentType(readDocumentType()); int size = getInt(null); for (int i = 0; i < size; i++) { update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), serializationVersion)); } }
update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), serializationVersion));
public void read(DocumentUpdate update) { short serializationVersion = getShort(null); update.setId(new DocumentId(this)); byte contents = getByte(null); if ((contents & 0x1) == 0) { throw new DeserializationException("Cannot deserialize DocumentUpdate without doctype"); } update.setDocumentType(readDocumentType()); int size = getInt(null); for (int i = 0; i < size; i++) { update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), serializationVersion)); } }
class VespaDocumentDeserializer42 extends VespaDocumentSerializer42 implements DocumentDeserializer { private final Compressor compressor = new Compressor(); private DocumentTypeManager manager; GrowableByteBuffer body; private short version; private List<SpanNode> spanNodes; private List<Annotation> annotations; private int[] stringPositions; VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer header, GrowableByteBuffer body, short version) { super(header); this.manager = manager; this.body = body; this.version = version; } VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer buf) { this(manager, buf, null, Document.SERIALIZED_VERSION); } VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer buf, GrowableByteBuffer body) { this(manager, buf, body, Document.SERIALIZED_VERSION); } final public DocumentTypeManager getDocumentTypeManager() { return manager; } public void read(Document document) { read(null, document); } public void read(FieldBase field, Document doc) { version = getShort(null); if (version < 6 || version > Document.SERIALIZED_VERSION) { throw new DeserializationException("Unknown version " + version + ", expected " + Document.SERIALIZED_VERSION + "."); } int dataLength = 0; int dataPos = 0; if (version < 7) { getInt2_4_8Bytes(null); } else { dataLength = getInt(null); dataPos = position(); } doc.setId(readDocumentId()); Byte content = getByte(null); doc.setDataType(readDocumentType()); if ((content & 0x2) != 0) { doc.getHeader().deserialize(new Field("header"),this); } if ((content & 0x4) != 0) { doc.getBody().deserialize(new Field("body"),this); } else if (body != null) { GrowableByteBuffer header = getBuf(); setBuf(body); body = null; doc.getBody().deserialize(new Field("body"), this); body = getBuf(); setBuf(header); } if (version < 8) { int crcVal = getInt(null); } if (version > 6) { if (dataLength != (position() - dataPos)) { throw new DeserializationException("Length mismatch"); } } } public void read(FieldBase field, FieldValue value) { throw new IllegalArgumentException("read not implemented yet."); } public <T extends FieldValue> void read(FieldBase field, Array<T> array) { int numElements = getNumCollectionElems(); ArrayList<T> list = new ArrayList<T>(numElements); ArrayDataType type = array.getDataType(); for (int i = 0; i < numElements; i++) { if (version < 7) { getInt(null); } FieldValue fv = type.getNestedType().createFieldValue(); fv.deserialize(null, this); list.add((T) fv); } array.clear(); array.addAll(list); } public <K extends FieldValue, V extends FieldValue> void read(FieldBase field, MapFieldValue<K, V> map) { int numElements = getNumCollectionElems(); Map<K,V> hash = new HashMap<>(); MapDataType type = map.getDataType(); for (int i = 0; i < numElements; i++) { if (version < 7) { getInt(null); } K key = (K) type.getKeyType().createFieldValue(); V val = (V) type.getValueType().createFieldValue(); key.deserialize(null, this); val.deserialize(null, this); hash.put(key, val); } map.clear(); map.putAll(hash); } private int getNumCollectionElems() { int numElements; if (version < 7) { getInt(null); numElements = getInt(null); } else { numElements = getInt1_2_4Bytes(null); } if (numElements < 0) { throw new DeserializationException("Bad number of array/map elements, " + numElements); } return numElements; } public <T extends FieldValue> void read(FieldBase field, CollectionFieldValue<T> value) { throw new IllegalArgumentException("read not implemented yet."); } public void read(FieldBase field, ByteFieldValue value) { value.assign(getByte(null)); } public void read(FieldBase field, DoubleFieldValue value) { value.assign(getDouble(null)); } public void read(FieldBase field, FloatFieldValue value) { value.assign(getFloat(null)); } public void read(FieldBase field, IntegerFieldValue value) { value.assign(getInt(null)); } public void read(FieldBase field, LongFieldValue value) { value.assign(getLong(null)); } public void read(FieldBase field, Raw value) { int rawsize = getInt(null); byte[] rawBytes = getBytes(null, rawsize); value.assign(rawBytes); } @Override public void read(FieldBase field, PredicateFieldValue value) { int len = getInt(null); byte[] buf = getBytes(null, len); value.assign(BinaryFormat.decode(buf)); } public void read(FieldBase field, StringFieldValue value) { byte coding = getByte(null); int length = getInt1_4Bytes(null); byte[] stringArray = new byte[length - 1]; buf.get(stringArray); buf.get(); value.setUnChecked(Utf8.toString(stringArray)); if ((coding & 64) == 64) { try { stringPositions = calculateStringPositions(stringArray); int size = buf.getInt(); int startPos = buf.position(); int numSpanTrees = buf.getInt1_2_4Bytes(); for (int i = 0; i < numSpanTrees; i++) { SpanTree tree = new SpanTree(); StringFieldValue treeName = new StringFieldValue(); treeName.deserialize(this); tree.setName(treeName.getString()); value.setSpanTree(tree); readSpanTree(tree, false); } buf.position(startPos + size); } finally { stringPositions = null; } } } @Override public void read(FieldBase field, TensorFieldValue value) { int encodedTensorLength = buf.getInt1_4Bytes(); if (encodedTensorLength > 0) { byte[] encodedTensor = getBytes(null, encodedTensorLength); value.assign(TypedBinaryFormat.decode(Optional.of(value.getDataType().getTensorType()), GrowableByteBuffer.wrap(encodedTensor))); } else { value.clear(); } } @Override public void read(FieldBase field, ReferenceFieldValue value) { final boolean documentIdPresent = (buf.get() != 0); if (documentIdPresent) { value.assign(readDocumentId()); } else { value.clear(); } } public void read(FieldBase fieldDef, Struct s) { s.setVersion(version); int startPos = position(); if (version < 6) { throw new DeserializationException("Illegal document serialization version " + version); } int dataSize; if (version < 7) { long rSize = getInt2_4_8Bytes(null); if (rSize > Integer.MAX_VALUE) { throw new DeserializationException("Raw size of data block is too large."); } dataSize = (int)rSize; } else { dataSize = getInt(null); } byte comprCode = getByte(null); CompressionType compression = CompressionType.valueOf(comprCode); int uncompressedSize = 0; if (compression != CompressionType.NONE && compression != CompressionType.INCOMPRESSIBLE) { long pSize = getInt2_4_8Bytes(null); if (pSize > Integer.MAX_VALUE) { throw new DeserializationException("Uncompressed size of data block is too large."); } uncompressedSize = (int) pSize; } int numberOfFields = getInt1_4Bytes(null); List<Tuple2<Integer, Long>> fieldIdsAndLengths = new ArrayList<>(numberOfFields); for (int i=0; i<numberOfFields; ++i) { fieldIdsAndLengths.add(new Tuple2<>(getInt1_4Bytes(null), getInt2_4_8Bytes(null))); } GrowableByteBuffer bigBuf = buf; if (version < 7) { int headerSize = position() - startPos; dataSize -= headerSize; } byte[] destination = compressor.decompress(compression, getBuf().array(), position(), uncompressedSize, Optional.of(dataSize)); position(position() + dataSize); buf = GrowableByteBuffer.wrap(destination); s.clear(); StructDataType type = s.getDataType(); for (int i=0; i<numberOfFields; ++i) { Field structField = type.getField(fieldIdsAndLengths.get(i).first, version); if (structField == null) { position(position() + fieldIdsAndLengths.get(i).second.intValue()); } else { int posBefore = position(); FieldValue value = structField.getDataType().createFieldValue(); value.deserialize(structField, this); s.setFieldValue(structField, value); position(posBefore + fieldIdsAndLengths.get(i).second.intValue()); } } buf = bigBuf; } public void read(FieldBase field, StructuredFieldValue value) { throw new IllegalArgumentException("read not implemented yet."); } public <T extends FieldValue> void read(FieldBase field, WeightedSet<T> ws) { WeightedSetDataType type = ws.getDataType(); getInt(null); int numElements = getInt(null); if (numElements < 0) { throw new DeserializationException("Bad number of weighted set elements, " + numElements); } ws.clearAndReserve(numElements * 2); for (int i = 0; i < numElements; i++) { int size = getInt(null); FieldValue value = type.getNestedType().createFieldValue(); value.deserialize(null, this); IntegerFieldValue weight = new IntegerFieldValue(getInt(null)); ws.putUnChecked((T) value, weight); } } public void read(FieldBase field, AnnotationReference value) { int seqId = buf.getInt1_2_4Bytes(); try { Annotation a = annotations.get(seqId); value.setReferenceNoCompatibilityCheck(a); } catch (IndexOutOfBoundsException iiobe) { throw new SerializationException("Could not serialize AnnotationReference value, reference not found.", iiobe); } } private Utf8String deserializeAttributeString() throws DeserializationException { int length = getByte(null); return new Utf8String(parseNullTerminatedString(length)); } private Utf8Array parseNullTerminatedString() { return parseNullTerminatedString(getBuf().getByteBuffer()); } private Utf8Array parseNullTerminatedString(int lengthExcludingNull) { return parseNullTerminatedString(getBuf().getByteBuffer(), lengthExcludingNull); } static Utf8Array parseNullTerminatedString(ByteBuffer buf, int lengthExcludingNull) throws DeserializationException { Utf8Array utf8 = new Utf8Array(buf, lengthExcludingNull); buf.get(); return utf8; } static Utf8Array parseNullTerminatedString(ByteBuffer buf) throws DeserializationException { int end = getFirstNullByte(buf); if (end == -1) { throw new DeserializationException("Could not locate terminating 0-byte for string"); } return parseNullTerminatedString(buf, end - buf.position()); } private static int getFirstNullByte(ByteBuffer buf) { int end = -1; int start = buf.position(); while (true) { try { byte dataByte = buf.get(); if (dataByte == (byte) 0) { end = buf.position() - 1; break; } } catch (Exception e) { break; } } buf.position(start); return end; } public void read(FieldPathUpdate update) { String fieldPath = getString(null); String whereClause = getString(null); update.setFieldPath(fieldPath); try { update.setWhereClause(whereClause); } catch (ParseException e) { throw new DeserializationException(e); } } public void read(AssignFieldPathUpdate update) { byte flags = getByte(null); update.setRemoveIfZero((flags & AssignFieldPathUpdate.REMOVE_IF_ZERO) != 0); update.setCreateMissingPath((flags & AssignFieldPathUpdate.CREATE_MISSING_PATH) != 0); if ((flags & AssignFieldPathUpdate.ARITHMETIC_EXPRESSION) != 0) { update.setExpression(getString(null)); } else { DataType dt = update.getFieldPath().getResultingDataType(); FieldValue fv = dt.createFieldValue(); fv.deserialize(this); update.setNewValue(fv); } } public void read(RemoveFieldPathUpdate update) { } public void read(AddFieldPathUpdate update) { DataType dt = update.getFieldPath().getResultingDataType(); FieldValue fv = dt.createFieldValue(); dt.createFieldValue(); fv.deserialize(this); if (!(fv instanceof Array)) { throw new DeserializationException("Add only applicable to array types"); } update.setNewValues((Array)fv); } public ValueUpdate getValueUpdate(DataType superType, DataType subType) { int vuTypeId = getInt(null); ValueUpdate.ValueUpdateClassID op = ValueUpdate.ValueUpdateClassID.getID(vuTypeId); if (op == null) { throw new IllegalArgumentException("Read type "+vuTypeId+" of bytebuffer, but this is not a legal value update type."); } switch (op) { case ADD: { FieldValue fval = subType.createFieldValue(); fval.deserialize(this); int weight = getInt(null); return new AddValueUpdate(fval, weight); } case ARITHMETIC: int opId = getInt(null); ArithmeticValueUpdate.Operator operator = ArithmeticValueUpdate.Operator.getID(opId); double operand = getDouble(null); return new ArithmeticValueUpdate(operator, operand); case ASSIGN: { byte contents = getByte(null); FieldValue fval = null; if (contents == (byte) 1) { fval = superType.createFieldValue(); fval.deserialize(this); } return new AssignValueUpdate(fval); } case CLEAR: return new ClearValueUpdate(); case MAP: if (superType instanceof ArrayDataType) { CollectionDataType type = (CollectionDataType) superType; IntegerFieldValue index = new IntegerFieldValue(); index.deserialize(this); ValueUpdate update = getValueUpdate(type.getNestedType(), null); return new MapValueUpdate(index, update); } else if (superType instanceof WeightedSetDataType) { CollectionDataType type = (CollectionDataType) superType; FieldValue fval = type.getNestedType().createFieldValue(); fval.deserialize(this); ValueUpdate update = getValueUpdate(DataType.INT, null); return new MapValueUpdate(fval, update); } else { throw new DeserializationException("MapValueUpdate only works for arrays and weighted sets"); } case REMOVE: FieldValue fval = ((CollectionDataType) superType).getNestedType().createFieldValue(); fval.deserialize(this); return new RemoveValueUpdate(fval); default: throw new DeserializationException( "Could not deserialize ValueUpdate, unknown valueUpdateClassID type " + vuTypeId); } } public void read(FieldUpdate fieldUpdate) { int fieldId = getInt(null); Field field = fieldUpdate.getDocumentType().getField(fieldId, fieldUpdate.getSerializationVersion()); if (field == null) { throw new DeserializationException( "Cannot deserialize FieldUpdate, field fieldId " + fieldId + " not found in " + fieldUpdate.getDocumentType()); } fieldUpdate.setField(field); int size = getInt(null); for (int i = 0; i < size; i++) { if (field.getDataType() instanceof CollectionDataType) { CollectionDataType collType = (CollectionDataType) field.getDataType(); fieldUpdate.addValueUpdate(getValueUpdate(collType, collType.getNestedType())); } else { fieldUpdate.addValueUpdate(getValueUpdate(field.getDataType(), null)); } } } public DocumentId readDocumentId() { Utf8String uri = new Utf8String(parseNullTerminatedString(getBuf().getByteBuffer())); return DocumentId.createFromSerialized(uri.toString()); } public DocumentType readDocumentType() { Utf8Array docTypeName = parseNullTerminatedString(); int ignored = getShort(null); DocumentType docType = manager.getDocumentType(new DataTypeName(docTypeName)); if (docType == null) { throw new DeserializationException("No known document type with name " + new Utf8String(docTypeName).toString()); } return docType; } private SpanNode readSpanNode() { byte type = buf.get(); buf.position(buf.position() - 1); SpanNode retval; if ((type & Span.ID) == Span.ID) { retval = new Span(); if (spanNodes != null) { spanNodes.add(retval); } read((Span) retval); } else if ((type & SpanList.ID) == SpanList.ID) { retval = new SpanList(); if (spanNodes != null) { spanNodes.add(retval); } read((SpanList) retval); } else if ((type & AlternateSpanList.ID) == AlternateSpanList.ID) { retval = new AlternateSpanList(); if (spanNodes != null) { spanNodes.add(retval); } read((AlternateSpanList) retval); } else { throw new DeserializationException("Cannot read SpanNode of type " + type); } return retval; } private void readSpanTree(SpanTree tree, boolean readName) { if (spanNodes != null || annotations != null) { throw new SerializationException("Deserialization of nested SpanTrees is not supported."); } spanNodes = new ArrayList<SpanNode>(); annotations = new ArrayList<Annotation>(); try { if (readName) { StringFieldValue treeName = new StringFieldValue(); treeName.deserialize(this); tree.setName(treeName.getString()); } SpanNode root = readSpanNode(); tree.setRoot(root); int numAnnotations = buf.getInt1_2_4Bytes(); for (int i = 0; i < numAnnotations; i++) { Annotation a = new Annotation(); annotations.add(a); } for (int i = 0; i < numAnnotations; i++) { read(annotations.get(i)); } for (Annotation a : annotations) { tree.annotate(a); } for (SpanNode node: spanNodes) { if (node instanceof Span) { correctIndexes((Span) node); } } } finally { spanNodes = null; annotations = null; } } public void read(SpanTree tree) { readSpanTree(tree, true); } public void read(Annotation annotation) { int annotationTypeId = buf.getInt(); AnnotationType type = manager.getAnnotationTypeRegistry().getType(annotationTypeId); if (type == null) { throw new DeserializationException("Cannot deserialize annotation of type " + annotationTypeId + " (unknown type)"); } annotation.setType(type); byte features = buf.get(); int length = buf.getInt1_2_4Bytes(); if ((features & (byte) 1) == (byte) 1) { int spanNodeId = buf.getInt1_2_4Bytes(); try { SpanNode node = spanNodes.get(spanNodeId); annotation.setSpanNode(node); } catch (IndexOutOfBoundsException ioobe) { throw new DeserializationException("Could not deserialize annotation, associated span node not found ", ioobe); } } if ((features & (byte) 2) == (byte) 2) { int dataTypeId = buf.getInt(); if (dataTypeId != type.getDataType().getId()) { buf.position(buf.position() + length - 4); } else { FieldValue value = type.getDataType().createFieldValue(); value.deserialize(this); annotation.setFieldValue(value); } } } public void read(Span span) { byte type = buf.get(); if ((type & Span.ID) != Span.ID) { throw new DeserializationException("Cannot deserialize Span with type " + type); } span.setFrom(buf.getInt1_2_4Bytes()); span.setLength(buf.getInt1_2_4Bytes()); } private void correctIndexes(Span span) { if (stringPositions == null) { throw new DeserializationException("Cannot deserialize Span, no access to parent StringFieldValue."); } int fromIndex = stringPositions[span.getFrom()]; int toIndex = stringPositions[span.getTo()]; int length = toIndex - fromIndex; span.setFrom(fromIndex); span.setLength(length); } public void read(SpanList spanList) { byte type = buf.get(); if ((type & SpanList.ID) != SpanList.ID) { throw new DeserializationException("Cannot deserialize SpanList with type " + type); } List<SpanNode> nodes = readSpanList(spanList); for (SpanNode node : nodes) { spanList.add(node); } } public void read(AlternateSpanList altSpanList) { byte type = buf.get(); if ((type & AlternateSpanList.ID) != AlternateSpanList.ID) { throw new DeserializationException("Cannot deserialize AlternateSpanList with type " + type); } int numSubTrees = buf.getInt1_2_4Bytes(); for (int i = 0; i < numSubTrees; i++) { double prob = buf.getDouble(); List<SpanNode> list = readSpanList(altSpanList); if (i == 0) { for (SpanNode node : list) { altSpanList.add(node); } altSpanList.setProbability(0, prob); } else { altSpanList.addChildren(i, list, prob); } } } private List<SpanNode> readSpanList(SpanNodeParent parent) { int size = buf.getInt1_2_4Bytes(); List<SpanNode> spanList = new ArrayList<SpanNode>(); for (int i = 0; i < size; i++) { spanList.add(readSpanNode()); } return spanList; } }
class VespaDocumentDeserializer42 extends VespaDocumentSerializer42 implements DocumentDeserializer { private final Compressor compressor = new Compressor(); private DocumentTypeManager manager; GrowableByteBuffer body; private short version; private List<SpanNode> spanNodes; private List<Annotation> annotations; private int[] stringPositions; VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer header, GrowableByteBuffer body, short version) { super(header); this.manager = manager; this.body = body; this.version = version; } VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer buf) { this(manager, buf, null, Document.SERIALIZED_VERSION); } VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer buf, GrowableByteBuffer body) { this(manager, buf, body, Document.SERIALIZED_VERSION); } final public DocumentTypeManager getDocumentTypeManager() { return manager; } public void read(Document document) { read(null, document); } public void read(FieldBase field, Document doc) { version = getShort(null); if (version < 6 || version > Document.SERIALIZED_VERSION) { throw new DeserializationException("Unknown version " + version + ", expected " + Document.SERIALIZED_VERSION + "."); } int dataLength = 0; int dataPos = 0; if (version < 7) { getInt2_4_8Bytes(null); } else { dataLength = getInt(null); dataPos = position(); } doc.setId(readDocumentId()); Byte content = getByte(null); doc.setDataType(readDocumentType()); if ((content & 0x2) != 0) { doc.getHeader().deserialize(new Field("header"),this); } if ((content & 0x4) != 0) { doc.getBody().deserialize(new Field("body"),this); } else if (body != null) { GrowableByteBuffer header = getBuf(); setBuf(body); body = null; doc.getBody().deserialize(new Field("body"), this); body = getBuf(); setBuf(header); } if (version < 8) { int crcVal = getInt(null); } if (version > 6) { if (dataLength != (position() - dataPos)) { throw new DeserializationException("Length mismatch"); } } } public void read(FieldBase field, FieldValue value) { throw new IllegalArgumentException("read not implemented yet."); } public <T extends FieldValue> void read(FieldBase field, Array<T> array) { int numElements = getNumCollectionElems(); ArrayList<T> list = new ArrayList<T>(numElements); ArrayDataType type = array.getDataType(); for (int i = 0; i < numElements; i++) { if (version < 7) { getInt(null); } FieldValue fv = type.getNestedType().createFieldValue(); fv.deserialize(null, this); list.add((T) fv); } array.clear(); array.addAll(list); } public <K extends FieldValue, V extends FieldValue> void read(FieldBase field, MapFieldValue<K, V> map) { int numElements = getNumCollectionElems(); Map<K,V> hash = new HashMap<>(); MapDataType type = map.getDataType(); for (int i = 0; i < numElements; i++) { if (version < 7) { getInt(null); } K key = (K) type.getKeyType().createFieldValue(); V val = (V) type.getValueType().createFieldValue(); key.deserialize(null, this); val.deserialize(null, this); hash.put(key, val); } map.clear(); map.putAll(hash); } private int getNumCollectionElems() { int numElements; if (version < 7) { getInt(null); numElements = getInt(null); } else { numElements = getInt1_2_4Bytes(null); } if (numElements < 0) { throw new DeserializationException("Bad number of array/map elements, " + numElements); } return numElements; } public <T extends FieldValue> void read(FieldBase field, CollectionFieldValue<T> value) { throw new IllegalArgumentException("read not implemented yet."); } public void read(FieldBase field, ByteFieldValue value) { value.assign(getByte(null)); } public void read(FieldBase field, DoubleFieldValue value) { value.assign(getDouble(null)); } public void read(FieldBase field, FloatFieldValue value) { value.assign(getFloat(null)); } public void read(FieldBase field, IntegerFieldValue value) { value.assign(getInt(null)); } public void read(FieldBase field, LongFieldValue value) { value.assign(getLong(null)); } public void read(FieldBase field, Raw value) { int rawsize = getInt(null); byte[] rawBytes = getBytes(null, rawsize); value.assign(rawBytes); } @Override public void read(FieldBase field, PredicateFieldValue value) { int len = getInt(null); byte[] buf = getBytes(null, len); value.assign(BinaryFormat.decode(buf)); } public void read(FieldBase field, StringFieldValue value) { byte coding = getByte(null); int length = getInt1_4Bytes(null); byte[] stringArray = new byte[length - 1]; buf.get(stringArray); buf.get(); value.setUnChecked(Utf8.toString(stringArray)); if ((coding & 64) == 64) { try { stringPositions = calculateStringPositions(stringArray); int size = buf.getInt(); int startPos = buf.position(); int numSpanTrees = buf.getInt1_2_4Bytes(); for (int i = 0; i < numSpanTrees; i++) { SpanTree tree = new SpanTree(); StringFieldValue treeName = new StringFieldValue(); treeName.deserialize(this); tree.setName(treeName.getString()); value.setSpanTree(tree); readSpanTree(tree, false); } buf.position(startPos + size); } finally { stringPositions = null; } } } @Override public void read(FieldBase field, TensorFieldValue value) { int encodedTensorLength = buf.getInt1_4Bytes(); if (encodedTensorLength > 0) { byte[] encodedTensor = getBytes(null, encodedTensorLength); value.assign(TypedBinaryFormat.decode(Optional.of(value.getDataType().getTensorType()), GrowableByteBuffer.wrap(encodedTensor))); } else { value.clear(); } } @Override public void read(FieldBase field, ReferenceFieldValue value) { final boolean documentIdPresent = (buf.get() != 0); if (documentIdPresent) { value.assign(readDocumentId()); } else { value.clear(); } } public void read(FieldBase fieldDef, Struct s) { s.setVersion(version); int startPos = position(); if (version < 6) { throw new DeserializationException("Illegal document serialization version " + version); } int dataSize; if (version < 7) { long rSize = getInt2_4_8Bytes(null); if (rSize > Integer.MAX_VALUE) { throw new DeserializationException("Raw size of data block is too large."); } dataSize = (int)rSize; } else { dataSize = getInt(null); } byte comprCode = getByte(null); CompressionType compression = CompressionType.valueOf(comprCode); int uncompressedSize = 0; if (compression != CompressionType.NONE && compression != CompressionType.INCOMPRESSIBLE) { long pSize = getInt2_4_8Bytes(null); if (pSize > Integer.MAX_VALUE) { throw new DeserializationException("Uncompressed size of data block is too large."); } uncompressedSize = (int) pSize; } int numberOfFields = getInt1_4Bytes(null); List<Tuple2<Integer, Long>> fieldIdsAndLengths = new ArrayList<>(numberOfFields); for (int i=0; i<numberOfFields; ++i) { fieldIdsAndLengths.add(new Tuple2<>(getInt1_4Bytes(null), getInt2_4_8Bytes(null))); } GrowableByteBuffer bigBuf = buf; if (version < 7) { int headerSize = position() - startPos; dataSize -= headerSize; } byte[] destination = compressor.decompress(compression, getBuf().array(), position(), uncompressedSize, Optional.of(dataSize)); position(position() + dataSize); buf = GrowableByteBuffer.wrap(destination); s.clear(); StructDataType type = s.getDataType(); for (int i=0; i<numberOfFields; ++i) { Field structField = type.getField(fieldIdsAndLengths.get(i).first, version); if (structField == null) { position(position() + fieldIdsAndLengths.get(i).second.intValue()); } else { int posBefore = position(); FieldValue value = structField.getDataType().createFieldValue(); value.deserialize(structField, this); s.setFieldValue(structField, value); position(posBefore + fieldIdsAndLengths.get(i).second.intValue()); } } buf = bigBuf; } public void read(FieldBase field, StructuredFieldValue value) { throw new IllegalArgumentException("read not implemented yet."); } public <T extends FieldValue> void read(FieldBase field, WeightedSet<T> ws) { WeightedSetDataType type = ws.getDataType(); getInt(null); int numElements = getInt(null); if (numElements < 0) { throw new DeserializationException("Bad number of weighted set elements, " + numElements); } ws.clearAndReserve(numElements * 2); for (int i = 0; i < numElements; i++) { int size = getInt(null); FieldValue value = type.getNestedType().createFieldValue(); value.deserialize(null, this); IntegerFieldValue weight = new IntegerFieldValue(getInt(null)); ws.putUnChecked((T) value, weight); } } public void read(FieldBase field, AnnotationReference value) { int seqId = buf.getInt1_2_4Bytes(); try { Annotation a = annotations.get(seqId); value.setReferenceNoCompatibilityCheck(a); } catch (IndexOutOfBoundsException iiobe) { throw new SerializationException("Could not serialize AnnotationReference value, reference not found.", iiobe); } } private Utf8String deserializeAttributeString() throws DeserializationException { int length = getByte(null); return new Utf8String(parseNullTerminatedString(length)); } private Utf8Array parseNullTerminatedString() { return parseNullTerminatedString(getBuf().getByteBuffer()); } private Utf8Array parseNullTerminatedString(int lengthExcludingNull) { return parseNullTerminatedString(getBuf().getByteBuffer(), lengthExcludingNull); } static Utf8Array parseNullTerminatedString(ByteBuffer buf, int lengthExcludingNull) throws DeserializationException { Utf8Array utf8 = new Utf8Array(buf, lengthExcludingNull); buf.get(); return utf8; } static Utf8Array parseNullTerminatedString(ByteBuffer buf) throws DeserializationException { int end = getFirstNullByte(buf); if (end == -1) { throw new DeserializationException("Could not locate terminating 0-byte for string"); } return parseNullTerminatedString(buf, end - buf.position()); } private static int getFirstNullByte(ByteBuffer buf) { int end = -1; int start = buf.position(); while (true) { try { byte dataByte = buf.get(); if (dataByte == (byte) 0) { end = buf.position() - 1; break; } } catch (Exception e) { break; } } buf.position(start); return end; } public void read(FieldPathUpdate update) { String fieldPath = getString(null); String whereClause = getString(null); update.setFieldPath(fieldPath); try { update.setWhereClause(whereClause); } catch (ParseException e) { throw new DeserializationException(e); } } public void read(AssignFieldPathUpdate update) { byte flags = getByte(null); update.setRemoveIfZero((flags & AssignFieldPathUpdate.REMOVE_IF_ZERO) != 0); update.setCreateMissingPath((flags & AssignFieldPathUpdate.CREATE_MISSING_PATH) != 0); if ((flags & AssignFieldPathUpdate.ARITHMETIC_EXPRESSION) != 0) { update.setExpression(getString(null)); } else { DataType dt = update.getFieldPath().getResultingDataType(); FieldValue fv = dt.createFieldValue(); fv.deserialize(this); update.setNewValue(fv); } } public void read(RemoveFieldPathUpdate update) { } public void read(AddFieldPathUpdate update) { DataType dt = update.getFieldPath().getResultingDataType(); FieldValue fv = dt.createFieldValue(); dt.createFieldValue(); fv.deserialize(this); if (!(fv instanceof Array)) { throw new DeserializationException("Add only applicable to array types"); } update.setNewValues((Array)fv); } public ValueUpdate getValueUpdate(DataType superType, DataType subType) { int vuTypeId = getInt(null); ValueUpdate.ValueUpdateClassID op = ValueUpdate.ValueUpdateClassID.getID(vuTypeId); if (op == null) { throw new IllegalArgumentException("Read type "+vuTypeId+" of bytebuffer, but this is not a legal value update type."); } switch (op) { case ADD: { FieldValue fval = subType.createFieldValue(); fval.deserialize(this); int weight = getInt(null); return new AddValueUpdate(fval, weight); } case ARITHMETIC: int opId = getInt(null); ArithmeticValueUpdate.Operator operator = ArithmeticValueUpdate.Operator.getID(opId); double operand = getDouble(null); return new ArithmeticValueUpdate(operator, operand); case ASSIGN: { byte contents = getByte(null); FieldValue fval = null; if (contents == (byte) 1) { fval = superType.createFieldValue(); fval.deserialize(this); } return new AssignValueUpdate(fval); } case CLEAR: return new ClearValueUpdate(); case MAP: if (superType instanceof ArrayDataType) { CollectionDataType type = (CollectionDataType) superType; IntegerFieldValue index = new IntegerFieldValue(); index.deserialize(this); ValueUpdate update = getValueUpdate(type.getNestedType(), null); return new MapValueUpdate(index, update); } else if (superType instanceof WeightedSetDataType) { CollectionDataType type = (CollectionDataType) superType; FieldValue fval = type.getNestedType().createFieldValue(); fval.deserialize(this); ValueUpdate update = getValueUpdate(DataType.INT, null); return new MapValueUpdate(fval, update); } else { throw new DeserializationException("MapValueUpdate only works for arrays and weighted sets"); } case REMOVE: FieldValue fval = ((CollectionDataType) superType).getNestedType().createFieldValue(); fval.deserialize(this); return new RemoveValueUpdate(fval); default: throw new DeserializationException( "Could not deserialize ValueUpdate, unknown valueUpdateClassID type " + vuTypeId); } } public void read(FieldUpdate fieldUpdate) { int fieldId = getInt(null); Field field = fieldUpdate.getDocumentType().getField(fieldId, fieldUpdate.getSerializationVersion()); if (field == null) { throw new DeserializationException( "Cannot deserialize FieldUpdate, field fieldId " + fieldId + " not found in " + fieldUpdate.getDocumentType()); } fieldUpdate.setField(field); int size = getInt(null); for (int i = 0; i < size; i++) { if (field.getDataType() instanceof CollectionDataType) { CollectionDataType collType = (CollectionDataType) field.getDataType(); fieldUpdate.addValueUpdate(getValueUpdate(collType, collType.getNestedType())); } else { fieldUpdate.addValueUpdate(getValueUpdate(field.getDataType(), null)); } } } public DocumentId readDocumentId() { Utf8String uri = new Utf8String(parseNullTerminatedString(getBuf().getByteBuffer())); return DocumentId.createFromSerialized(uri.toString()); } public DocumentType readDocumentType() { Utf8Array docTypeName = parseNullTerminatedString(); int ignored = getShort(null); DocumentType docType = manager.getDocumentType(new DataTypeName(docTypeName)); if (docType == null) { throw new DeserializationException("No known document type with name " + new Utf8String(docTypeName).toString()); } return docType; } private SpanNode readSpanNode() { byte type = buf.get(); buf.position(buf.position() - 1); SpanNode retval; if ((type & Span.ID) == Span.ID) { retval = new Span(); if (spanNodes != null) { spanNodes.add(retval); } read((Span) retval); } else if ((type & SpanList.ID) == SpanList.ID) { retval = new SpanList(); if (spanNodes != null) { spanNodes.add(retval); } read((SpanList) retval); } else if ((type & AlternateSpanList.ID) == AlternateSpanList.ID) { retval = new AlternateSpanList(); if (spanNodes != null) { spanNodes.add(retval); } read((AlternateSpanList) retval); } else { throw new DeserializationException("Cannot read SpanNode of type " + type); } return retval; } private void readSpanTree(SpanTree tree, boolean readName) { if (spanNodes != null || annotations != null) { throw new SerializationException("Deserialization of nested SpanTrees is not supported."); } spanNodes = new ArrayList<SpanNode>(); annotations = new ArrayList<Annotation>(); try { if (readName) { StringFieldValue treeName = new StringFieldValue(); treeName.deserialize(this); tree.setName(treeName.getString()); } SpanNode root = readSpanNode(); tree.setRoot(root); int numAnnotations = buf.getInt1_2_4Bytes(); for (int i = 0; i < numAnnotations; i++) { Annotation a = new Annotation(); annotations.add(a); } for (int i = 0; i < numAnnotations; i++) { read(annotations.get(i)); } for (Annotation a : annotations) { tree.annotate(a); } for (SpanNode node: spanNodes) { if (node instanceof Span) { correctIndexes((Span) node); } } } finally { spanNodes = null; annotations = null; } } public void read(SpanTree tree) { readSpanTree(tree, true); } public void read(Annotation annotation) { int annotationTypeId = buf.getInt(); AnnotationType type = manager.getAnnotationTypeRegistry().getType(annotationTypeId); if (type == null) { throw new DeserializationException("Cannot deserialize annotation of type " + annotationTypeId + " (unknown type)"); } annotation.setType(type); byte features = buf.get(); int length = buf.getInt1_2_4Bytes(); if ((features & (byte) 1) == (byte) 1) { int spanNodeId = buf.getInt1_2_4Bytes(); try { SpanNode node = spanNodes.get(spanNodeId); annotation.setSpanNode(node); } catch (IndexOutOfBoundsException ioobe) { throw new DeserializationException("Could not deserialize annotation, associated span node not found ", ioobe); } } if ((features & (byte) 2) == (byte) 2) { int dataTypeId = buf.getInt(); if (dataTypeId != type.getDataType().getId()) { buf.position(buf.position() + length - 4); } else { FieldValue value = type.getDataType().createFieldValue(); value.deserialize(this); annotation.setFieldValue(value); } } } public void read(Span span) { byte type = buf.get(); if ((type & Span.ID) != Span.ID) { throw new DeserializationException("Cannot deserialize Span with type " + type); } span.setFrom(buf.getInt1_2_4Bytes()); span.setLength(buf.getInt1_2_4Bytes()); } private void correctIndexes(Span span) { if (stringPositions == null) { throw new DeserializationException("Cannot deserialize Span, no access to parent StringFieldValue."); } int fromIndex = stringPositions[span.getFrom()]; int toIndex = stringPositions[span.getTo()]; int length = toIndex - fromIndex; span.setFrom(fromIndex); span.setLength(length); } public void read(SpanList spanList) { byte type = buf.get(); if ((type & SpanList.ID) != SpanList.ID) { throw new DeserializationException("Cannot deserialize SpanList with type " + type); } List<SpanNode> nodes = readSpanList(spanList); for (SpanNode node : nodes) { spanList.add(node); } } public void read(AlternateSpanList altSpanList) { byte type = buf.get(); if ((type & AlternateSpanList.ID) != AlternateSpanList.ID) { throw new DeserializationException("Cannot deserialize AlternateSpanList with type " + type); } int numSubTrees = buf.getInt1_2_4Bytes(); for (int i = 0; i < numSubTrees; i++) { double prob = buf.getDouble(); List<SpanNode> list = readSpanList(altSpanList); if (i == 0) { for (SpanNode node : list) { altSpanList.add(node); } altSpanList.setProbability(0, prob); } else { altSpanList.addChildren(i, list, prob); } } } private List<SpanNode> readSpanList(SpanNodeParent parent) { int size = buf.getInt1_2_4Bytes(); List<SpanNode> spanList = new ArrayList<SpanNode>(); for (int i = 0; i < size; i++) { spanList.add(readSpanNode()); } return spanList; } }
Remove (now presumably outdated) TODO?
public void read(DocumentUpdate update) { update.setId(new DocumentId(this)); update.setDocumentType(readDocumentType()); int size = getInt(null); for (int i = 0; i < size; i++) { update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), 8)); } int sizeAndFlags = getInt(null); update.setCreateIfNonExistent(DocumentUpdateFlags.extractFlags(sizeAndFlags).getCreateIfNonExistent()); size = DocumentUpdateFlags.extractValue(sizeAndFlags); for (int i = 0; i < size; i++) { int type = getByte(null); update.addFieldPathUpdate(FieldPathUpdate.create(FieldPathUpdate.Type.valueOf(type), update.getDocumentType(), this)); } }
update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), 8));
public void read(DocumentUpdate update) { update.setId(new DocumentId(this)); update.setDocumentType(readDocumentType()); int size = getInt(null); for (int i = 0; i < size; i++) { update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), 8)); } int sizeAndFlags = getInt(null); update.setCreateIfNonExistent(DocumentUpdateFlags.extractFlags(sizeAndFlags).getCreateIfNonExistent()); size = DocumentUpdateFlags.extractValue(sizeAndFlags); for (int i = 0; i < size; i++) { int type = getByte(null); update.addFieldPathUpdate(FieldPathUpdate.create(FieldPathUpdate.Type.valueOf(type), update.getDocumentType(), this)); } }
class VespaDocumentDeserializerHead extends VespaDocumentDeserializer42 { public VespaDocumentDeserializerHead(DocumentTypeManager manager, GrowableByteBuffer buffer) { super(manager, buffer); } @Override }
class VespaDocumentDeserializerHead extends VespaDocumentDeserializer42 { public VespaDocumentDeserializerHead(DocumentTypeManager manager, GrowableByteBuffer buffer) { super(manager, buffer); } @Override }
Correct
public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; }
return null; }
public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); } @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); } @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
Ooops, correct. Missing test...
public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); }
return removeFieldUpdate(prev.getField());
public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); } @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); } @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
Correct
public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); }
string.append(createIfNonExistent);
public String toString() { StringBuilder string = new StringBuilder(); string.append("update of document '"); string.append(docId); string.append("': "); string.append("create-if-non-existent="); string.append(createIfNonExistent); string.append(": "); string.append("["); for (FieldUpdate fieldUpdate : fieldUpdates) { string.append(fieldUpdate).append(" "); } string.append("]"); if (fieldPathUpdates.size() > 0) { string.append(" [ "); for (FieldPathUpdate up : fieldPathUpdates) { string.append(up.toString()).append(" "); } string.append(" ]"); } return string.toString(); }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); } public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
class DocumentUpdate extends DocumentOperation implements Iterable<FieldPathUpdate> { public static final int CLASSID = 0x1000 + 6; private DocumentId docId; private final List<FieldUpdate> fieldUpdates; private final List<FieldPathUpdate> fieldPathUpdates; private DocumentType documentType; private Boolean createIfNonExistent; /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, DocumentId docId) { this.docId = docId; this.documentType = docType; this.fieldUpdates = new ArrayList<>(); this.fieldPathUpdates = new ArrayList<>(); } /** * Creates a new document update using a reader */ public DocumentUpdate(DocumentUpdateReader reader) { docId = null; documentType = null; fieldUpdates = new ArrayList<>(); fieldPathUpdates = new ArrayList<>(); reader.read(this); } /** * Creates a DocumentUpdate. * * @param docId the ID of the update * @param docType the document type that this update is valid for */ public DocumentUpdate(DocumentType docType, String docId) { this(docType, new DocumentId(docId)); } public DocumentId getId() { return docId; } /** * Sets the document id of the document to update. * Use only while deserializing - changing the document id after creation has undefined behaviour. */ public void setId(DocumentId id) { docId = id; } private void verifyType(Document doc) { if (!documentType.equals(doc.getDataType())) { throw new IllegalArgumentException( "Document " + doc.getId() + " with type " + doc.getDataType() + " must have same type as update, which is type " + documentType); } } /** * Applies this document update. * * @param doc the document to apply the update to * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate applyTo(Document doc) { verifyType(doc); for (FieldUpdate fieldUpdate : fieldUpdates) { fieldUpdate.applyTo(doc); } for (FieldPathUpdate fieldPathUpdate : fieldPathUpdates) { fieldPathUpdate.applyTo(doc); } return this; } /** * Prune away any field update that will not modify any field in the document. * @param doc document to check against * @return a reference to itself * @throws IllegalArgumentException if the document does not have the same document type as this update */ public DocumentUpdate prune(Document doc) { verifyType(doc); for (Iterator<FieldUpdate> iter = fieldUpdates.iterator(); iter.hasNext();) { FieldUpdate update = iter.next(); if (!update.isEmpty()) { ValueUpdate last = update.getValueUpdate(update.size() - 1); if (last instanceof AssignValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if ((currentValue != null) && currentValue.equals(last.getValue())) { iter.remove(); } } else if (last instanceof ClearValueUpdate) { FieldValue currentValue = doc.getFieldValue(update.getField()); if (currentValue == null) { iter.remove(); } else { FieldValue copy = currentValue.clone(); copy.clear(); if (currentValue.equals(copy)) { iter.remove(); } } } } } return this; } /** * Get an unmodifiable list of all field updates that this document update specifies. * * @return a list of all FieldUpdates in this DocumentUpdate * @deprecated Use fieldUpdates() instead. */ @Deprecated public List<FieldUpdate> getFieldUpdates() { return Collections.unmodifiableList(fieldUpdates); } /** * Get an unmodifiable collection of all field updates that this document update specifies. * * @return a collection of all FieldUpdates in this DocumentUpdate */ public Collection<FieldUpdate> fieldUpdates() { return Collections.unmodifiableCollection(fieldUpdates); } /** * Get an unmodifiable list of all field path updates this document update specifies. * * @return Returns a list of all field path updates in this document update. * @deprecated Use fieldPathUpdates() instead. */ @Deprecated public List<FieldPathUpdate> getFieldPathUpdates() { return Collections.unmodifiableList(fieldPathUpdates); } /** * Get an unmodifiable collection of all field path updates that this document update specifies. * * @return a collection of all FieldPathUpdates in this DocumentUpdate */ public Collection<FieldPathUpdate> fieldPathUpdates() { return Collections.unmodifiableCollection(fieldPathUpdates); } /** Returns the type of the document this updates * * @return The documentype of the document */ public DocumentType getDocumentType() { return documentType; } /** * Sets the document type. Use only while deserializing - changing the document type after creation * has undefined behaviour. */ public void setDocumentType(DocumentType type) { documentType = type; } /** * Get the field update at the specified index in the list of field updates. * * @param index the index of the FieldUpdate to return * @return the FieldUpdate at the specified index * @throws IndexOutOfBoundsException if index is out of range * @deprecated use getFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate getFieldUpdate(int index) { return fieldUpdates.get(index); } /** * Replaces the field update at the specified index in the list of field updates. * * @param index index of the FieldUpdate to replace * @param upd the FieldUpdate to be stored at the specified position * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated Use removeFieldUpdate/addFieldUpdate instead */ @Deprecated public FieldUpdate setFieldUpdate(int index, FieldUpdate upd) { FieldUpdate old = fieldUpdates.get(index); fieldUpdates.set(index, upd); return old; } /** * Returns the update for a field * * @param field the field to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(Field field) { return getFieldUpdateById(field.getId()); } /** Removes all field updates from the list for field updates. */ public void clearFieldUpdates() { fieldUpdates.clear(); } /** * Returns the update for a field name * * @param fieldName the field name to return the update of * @return the update for the field, or null if that field has no update in this */ public FieldUpdate getFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? getFieldUpdate(field) : null; } private FieldUpdate getFieldUpdateById(Integer fieldId) { for (FieldUpdate fieldUpdate : fieldUpdates) { if (fieldUpdate.getField().getId() == fieldId) { return fieldUpdate; } } return null; } /** * Assigns the field updates of this document update. * This document update receives ownership of the list - it can not be subsequently used * by the caller. Also note that there no assumptions can be made on the order of items * after this call. They might have been joined if for the same field or reordered. * * @param fieldUpdates the new list of updates of this * @throws NullPointerException if the argument passed is null */ public void setFieldUpdates(Collection<FieldUpdate> fieldUpdates) { if (fieldUpdates == null) { throw new NullPointerException("The field updates of a document update can not be null"); } clearFieldUpdates(); addFieldUpdates(fieldUpdates); } public void addFieldUpdates(Collection<FieldUpdate> fieldUpdates) { for (FieldUpdate fieldUpdate : fieldUpdates) { addFieldUpdate(fieldUpdate); } } /** * Get the number of field updates in this document update. * * @return the size of the List of FieldUpdates */ public int size() { return fieldUpdates.size(); } /** * Adds the given {@link FieldUpdate} to this DocumentUpdate. If this DocumentUpdate already contains a FieldUpdate * for the named field, the content of the given FieldUpdate is added to the existing one. * * @param update The FieldUpdate to add to this DocumentUpdate. * @return This, to allow chaining. * @throws IllegalArgumentException If the {@link DocumentType} of this DocumentUpdate does not have a corresponding * field. */ public DocumentUpdate addFieldUpdate(FieldUpdate update) { int fieldId = update.getField().getId(); if (documentType.getField(fieldId) == null) { throw new IllegalArgumentException("Document type '" + documentType.getName() + "' does not have field '" + update.getField().getName() + "'."); } FieldUpdate prevUpdate = getFieldUpdateById(fieldId); if (prevUpdate != update) { if (prevUpdate != null) { prevUpdate.addAll(update); } else { fieldUpdates.add(update); } } return this; } /** * Adds a field path update to perform on the document. * * @return a reference to itself. */ public DocumentUpdate addFieldPathUpdate(FieldPathUpdate fieldPathUpdate) { fieldPathUpdates.add(fieldPathUpdate); return this; } /** * Adds all the field- and field path updates of the given document update to this. If the given update refers to a * different document or document type than this, this method throws an exception. * * @param update The update whose content to add to this. * @throws IllegalArgumentException If the {@link DocumentId} or {@link DocumentType} of the given DocumentUpdate * does not match the content of this. */ public void addAll(DocumentUpdate update) { if (update == null) { return; } if (!docId.equals(update.docId)) { throw new IllegalArgumentException("Expected " + docId + ", got " + update.docId + "."); } if (!documentType.equals(update.documentType)) { throw new IllegalArgumentException("Expected " + documentType + ", got " + update.documentType + "."); } addFieldUpdates(update.fieldUpdates()); for (FieldPathUpdate pathUpd : update.fieldPathUpdates) { addFieldPathUpdate(pathUpd); } } /** * Removes the field update at the specified position in the list of field updates. * * @param index the index of the FieldUpdate to remove * @return the FieldUpdate previously at the specified position * @throws IndexOutOfBoundsException if index is out of range * @deprecated use removeFieldUpdate(Field field) instead. */ @Deprecated public FieldUpdate removeFieldUpdate(int index) { FieldUpdate prev = getFieldUpdate(index); fieldUpdates.remove(index); return removeFieldUpdate(prev.getField()); } public FieldUpdate removeFieldUpdate(Field field) { for (Iterator<FieldUpdate> it = fieldUpdates.iterator(); it.hasNext();) { FieldUpdate fieldUpdate = it.next(); if (fieldUpdate.getField().equals(field)) { it.remove(); return fieldUpdate; } } return null; } public FieldUpdate removeFieldUpdate(String fieldName) { Field field = documentType.getField(fieldName); return field != null ? removeFieldUpdate(field) : null; } /** * Returns the document type of this document update. * * @return the document type of this document update */ public DocumentType getType() { return documentType; } public final void serialize(GrowableByteBuffer buf) { serialize(DocumentSerializerFactory.create42(buf)); } public void serialize(DocumentUpdateWriter data) { data.write(this); } @Override public boolean equals(Object o) { if (this == o) return true; if (!(o instanceof DocumentUpdate)) return false; DocumentUpdate that = (DocumentUpdate) o; if (docId != null ? !docId.equals(that.docId) : that.docId != null) return false; if (documentType != null ? !documentType.equals(that.documentType) : that.documentType != null) return false; if (fieldPathUpdates != null ? !fieldPathUpdates.equals(that.fieldPathUpdates) : that.fieldPathUpdates != null) return false; if (fieldUpdates != null ? !fieldUpdates.equals(that.fieldUpdates) : that.fieldUpdates != null) return false; if (this.getCreateIfNonExistent() != ((DocumentUpdate) o).getCreateIfNonExistent()) return false; return true; } @Override public int hashCode() { int result = docId != null ? docId.hashCode() : 0; result = 31 * result + (fieldUpdates != null ? fieldUpdates.hashCode() : 0); result = 31 * result + (fieldPathUpdates != null ? fieldPathUpdates.hashCode() : 0); result = 31 * result + (documentType != null ? documentType.hashCode() : 0); return result; } @Override @Override public Iterator<FieldPathUpdate> iterator() { return fieldPathUpdates.iterator(); } /** * Returns whether or not this field update contains any field- or field path updates. * * @return True if this update is empty. */ public boolean isEmpty() { return fieldUpdates.isEmpty() && fieldPathUpdates.isEmpty(); } /** * Sets whether this update should create the document it updates if that document does not exist. * In this case an empty document is created before the update is applied. * * @since 5.17 * @param value Whether the document it updates should be created. */ public void setCreateIfNonExistent(boolean value) { createIfNonExistent = value; } /** * Gets whether this update should create the document it updates if that document does not exist. * * @since 5.17 * @return Whether the document it updates should be created. */ public boolean getCreateIfNonExistent() { return createIfNonExistent != null && createIfNonExistent; } public Optional<Boolean> getOptionalCreateIfNonExistent() { return Optional.ofNullable(createIfNonExistent); } }
Correct
public void read(DocumentUpdate update) { short serializationVersion = getShort(null); update.setId(new DocumentId(this)); byte contents = getByte(null); if ((contents & 0x1) == 0) { throw new DeserializationException("Cannot deserialize DocumentUpdate without doctype"); } update.setDocumentType(readDocumentType()); int size = getInt(null); for (int i = 0; i < size; i++) { update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), serializationVersion)); } }
update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), serializationVersion));
public void read(DocumentUpdate update) { short serializationVersion = getShort(null); update.setId(new DocumentId(this)); byte contents = getByte(null); if ((contents & 0x1) == 0) { throw new DeserializationException("Cannot deserialize DocumentUpdate without doctype"); } update.setDocumentType(readDocumentType()); int size = getInt(null); for (int i = 0; i < size; i++) { update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), serializationVersion)); } }
class VespaDocumentDeserializer42 extends VespaDocumentSerializer42 implements DocumentDeserializer { private final Compressor compressor = new Compressor(); private DocumentTypeManager manager; GrowableByteBuffer body; private short version; private List<SpanNode> spanNodes; private List<Annotation> annotations; private int[] stringPositions; VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer header, GrowableByteBuffer body, short version) { super(header); this.manager = manager; this.body = body; this.version = version; } VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer buf) { this(manager, buf, null, Document.SERIALIZED_VERSION); } VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer buf, GrowableByteBuffer body) { this(manager, buf, body, Document.SERIALIZED_VERSION); } final public DocumentTypeManager getDocumentTypeManager() { return manager; } public void read(Document document) { read(null, document); } public void read(FieldBase field, Document doc) { version = getShort(null); if (version < 6 || version > Document.SERIALIZED_VERSION) { throw new DeserializationException("Unknown version " + version + ", expected " + Document.SERIALIZED_VERSION + "."); } int dataLength = 0; int dataPos = 0; if (version < 7) { getInt2_4_8Bytes(null); } else { dataLength = getInt(null); dataPos = position(); } doc.setId(readDocumentId()); Byte content = getByte(null); doc.setDataType(readDocumentType()); if ((content & 0x2) != 0) { doc.getHeader().deserialize(new Field("header"),this); } if ((content & 0x4) != 0) { doc.getBody().deserialize(new Field("body"),this); } else if (body != null) { GrowableByteBuffer header = getBuf(); setBuf(body); body = null; doc.getBody().deserialize(new Field("body"), this); body = getBuf(); setBuf(header); } if (version < 8) { int crcVal = getInt(null); } if (version > 6) { if (dataLength != (position() - dataPos)) { throw new DeserializationException("Length mismatch"); } } } public void read(FieldBase field, FieldValue value) { throw new IllegalArgumentException("read not implemented yet."); } public <T extends FieldValue> void read(FieldBase field, Array<T> array) { int numElements = getNumCollectionElems(); ArrayList<T> list = new ArrayList<T>(numElements); ArrayDataType type = array.getDataType(); for (int i = 0; i < numElements; i++) { if (version < 7) { getInt(null); } FieldValue fv = type.getNestedType().createFieldValue(); fv.deserialize(null, this); list.add((T) fv); } array.clear(); array.addAll(list); } public <K extends FieldValue, V extends FieldValue> void read(FieldBase field, MapFieldValue<K, V> map) { int numElements = getNumCollectionElems(); Map<K,V> hash = new HashMap<>(); MapDataType type = map.getDataType(); for (int i = 0; i < numElements; i++) { if (version < 7) { getInt(null); } K key = (K) type.getKeyType().createFieldValue(); V val = (V) type.getValueType().createFieldValue(); key.deserialize(null, this); val.deserialize(null, this); hash.put(key, val); } map.clear(); map.putAll(hash); } private int getNumCollectionElems() { int numElements; if (version < 7) { getInt(null); numElements = getInt(null); } else { numElements = getInt1_2_4Bytes(null); } if (numElements < 0) { throw new DeserializationException("Bad number of array/map elements, " + numElements); } return numElements; } public <T extends FieldValue> void read(FieldBase field, CollectionFieldValue<T> value) { throw new IllegalArgumentException("read not implemented yet."); } public void read(FieldBase field, ByteFieldValue value) { value.assign(getByte(null)); } public void read(FieldBase field, DoubleFieldValue value) { value.assign(getDouble(null)); } public void read(FieldBase field, FloatFieldValue value) { value.assign(getFloat(null)); } public void read(FieldBase field, IntegerFieldValue value) { value.assign(getInt(null)); } public void read(FieldBase field, LongFieldValue value) { value.assign(getLong(null)); } public void read(FieldBase field, Raw value) { int rawsize = getInt(null); byte[] rawBytes = getBytes(null, rawsize); value.assign(rawBytes); } @Override public void read(FieldBase field, PredicateFieldValue value) { int len = getInt(null); byte[] buf = getBytes(null, len); value.assign(BinaryFormat.decode(buf)); } public void read(FieldBase field, StringFieldValue value) { byte coding = getByte(null); int length = getInt1_4Bytes(null); byte[] stringArray = new byte[length - 1]; buf.get(stringArray); buf.get(); value.setUnChecked(Utf8.toString(stringArray)); if ((coding & 64) == 64) { try { stringPositions = calculateStringPositions(stringArray); int size = buf.getInt(); int startPos = buf.position(); int numSpanTrees = buf.getInt1_2_4Bytes(); for (int i = 0; i < numSpanTrees; i++) { SpanTree tree = new SpanTree(); StringFieldValue treeName = new StringFieldValue(); treeName.deserialize(this); tree.setName(treeName.getString()); value.setSpanTree(tree); readSpanTree(tree, false); } buf.position(startPos + size); } finally { stringPositions = null; } } } @Override public void read(FieldBase field, TensorFieldValue value) { int encodedTensorLength = buf.getInt1_4Bytes(); if (encodedTensorLength > 0) { byte[] encodedTensor = getBytes(null, encodedTensorLength); value.assign(TypedBinaryFormat.decode(Optional.of(value.getDataType().getTensorType()), GrowableByteBuffer.wrap(encodedTensor))); } else { value.clear(); } } @Override public void read(FieldBase field, ReferenceFieldValue value) { final boolean documentIdPresent = (buf.get() != 0); if (documentIdPresent) { value.assign(readDocumentId()); } else { value.clear(); } } public void read(FieldBase fieldDef, Struct s) { s.setVersion(version); int startPos = position(); if (version < 6) { throw new DeserializationException("Illegal document serialization version " + version); } int dataSize; if (version < 7) { long rSize = getInt2_4_8Bytes(null); if (rSize > Integer.MAX_VALUE) { throw new DeserializationException("Raw size of data block is too large."); } dataSize = (int)rSize; } else { dataSize = getInt(null); } byte comprCode = getByte(null); CompressionType compression = CompressionType.valueOf(comprCode); int uncompressedSize = 0; if (compression != CompressionType.NONE && compression != CompressionType.INCOMPRESSIBLE) { long pSize = getInt2_4_8Bytes(null); if (pSize > Integer.MAX_VALUE) { throw new DeserializationException("Uncompressed size of data block is too large."); } uncompressedSize = (int) pSize; } int numberOfFields = getInt1_4Bytes(null); List<Tuple2<Integer, Long>> fieldIdsAndLengths = new ArrayList<>(numberOfFields); for (int i=0; i<numberOfFields; ++i) { fieldIdsAndLengths.add(new Tuple2<>(getInt1_4Bytes(null), getInt2_4_8Bytes(null))); } GrowableByteBuffer bigBuf = buf; if (version < 7) { int headerSize = position() - startPos; dataSize -= headerSize; } byte[] destination = compressor.decompress(compression, getBuf().array(), position(), uncompressedSize, Optional.of(dataSize)); position(position() + dataSize); buf = GrowableByteBuffer.wrap(destination); s.clear(); StructDataType type = s.getDataType(); for (int i=0; i<numberOfFields; ++i) { Field structField = type.getField(fieldIdsAndLengths.get(i).first, version); if (structField == null) { position(position() + fieldIdsAndLengths.get(i).second.intValue()); } else { int posBefore = position(); FieldValue value = structField.getDataType().createFieldValue(); value.deserialize(structField, this); s.setFieldValue(structField, value); position(posBefore + fieldIdsAndLengths.get(i).second.intValue()); } } buf = bigBuf; } public void read(FieldBase field, StructuredFieldValue value) { throw new IllegalArgumentException("read not implemented yet."); } public <T extends FieldValue> void read(FieldBase field, WeightedSet<T> ws) { WeightedSetDataType type = ws.getDataType(); getInt(null); int numElements = getInt(null); if (numElements < 0) { throw new DeserializationException("Bad number of weighted set elements, " + numElements); } ws.clearAndReserve(numElements * 2); for (int i = 0; i < numElements; i++) { int size = getInt(null); FieldValue value = type.getNestedType().createFieldValue(); value.deserialize(null, this); IntegerFieldValue weight = new IntegerFieldValue(getInt(null)); ws.putUnChecked((T) value, weight); } } public void read(FieldBase field, AnnotationReference value) { int seqId = buf.getInt1_2_4Bytes(); try { Annotation a = annotations.get(seqId); value.setReferenceNoCompatibilityCheck(a); } catch (IndexOutOfBoundsException iiobe) { throw new SerializationException("Could not serialize AnnotationReference value, reference not found.", iiobe); } } private Utf8String deserializeAttributeString() throws DeserializationException { int length = getByte(null); return new Utf8String(parseNullTerminatedString(length)); } private Utf8Array parseNullTerminatedString() { return parseNullTerminatedString(getBuf().getByteBuffer()); } private Utf8Array parseNullTerminatedString(int lengthExcludingNull) { return parseNullTerminatedString(getBuf().getByteBuffer(), lengthExcludingNull); } static Utf8Array parseNullTerminatedString(ByteBuffer buf, int lengthExcludingNull) throws DeserializationException { Utf8Array utf8 = new Utf8Array(buf, lengthExcludingNull); buf.get(); return utf8; } static Utf8Array parseNullTerminatedString(ByteBuffer buf) throws DeserializationException { int end = getFirstNullByte(buf); if (end == -1) { throw new DeserializationException("Could not locate terminating 0-byte for string"); } return parseNullTerminatedString(buf, end - buf.position()); } private static int getFirstNullByte(ByteBuffer buf) { int end = -1; int start = buf.position(); while (true) { try { byte dataByte = buf.get(); if (dataByte == (byte) 0) { end = buf.position() - 1; break; } } catch (Exception e) { break; } } buf.position(start); return end; } public void read(FieldPathUpdate update) { String fieldPath = getString(null); String whereClause = getString(null); update.setFieldPath(fieldPath); try { update.setWhereClause(whereClause); } catch (ParseException e) { throw new DeserializationException(e); } } public void read(AssignFieldPathUpdate update) { byte flags = getByte(null); update.setRemoveIfZero((flags & AssignFieldPathUpdate.REMOVE_IF_ZERO) != 0); update.setCreateMissingPath((flags & AssignFieldPathUpdate.CREATE_MISSING_PATH) != 0); if ((flags & AssignFieldPathUpdate.ARITHMETIC_EXPRESSION) != 0) { update.setExpression(getString(null)); } else { DataType dt = update.getFieldPath().getResultingDataType(); FieldValue fv = dt.createFieldValue(); fv.deserialize(this); update.setNewValue(fv); } } public void read(RemoveFieldPathUpdate update) { } public void read(AddFieldPathUpdate update) { DataType dt = update.getFieldPath().getResultingDataType(); FieldValue fv = dt.createFieldValue(); dt.createFieldValue(); fv.deserialize(this); if (!(fv instanceof Array)) { throw new DeserializationException("Add only applicable to array types"); } update.setNewValues((Array)fv); } public ValueUpdate getValueUpdate(DataType superType, DataType subType) { int vuTypeId = getInt(null); ValueUpdate.ValueUpdateClassID op = ValueUpdate.ValueUpdateClassID.getID(vuTypeId); if (op == null) { throw new IllegalArgumentException("Read type "+vuTypeId+" of bytebuffer, but this is not a legal value update type."); } switch (op) { case ADD: { FieldValue fval = subType.createFieldValue(); fval.deserialize(this); int weight = getInt(null); return new AddValueUpdate(fval, weight); } case ARITHMETIC: int opId = getInt(null); ArithmeticValueUpdate.Operator operator = ArithmeticValueUpdate.Operator.getID(opId); double operand = getDouble(null); return new ArithmeticValueUpdate(operator, operand); case ASSIGN: { byte contents = getByte(null); FieldValue fval = null; if (contents == (byte) 1) { fval = superType.createFieldValue(); fval.deserialize(this); } return new AssignValueUpdate(fval); } case CLEAR: return new ClearValueUpdate(); case MAP: if (superType instanceof ArrayDataType) { CollectionDataType type = (CollectionDataType) superType; IntegerFieldValue index = new IntegerFieldValue(); index.deserialize(this); ValueUpdate update = getValueUpdate(type.getNestedType(), null); return new MapValueUpdate(index, update); } else if (superType instanceof WeightedSetDataType) { CollectionDataType type = (CollectionDataType) superType; FieldValue fval = type.getNestedType().createFieldValue(); fval.deserialize(this); ValueUpdate update = getValueUpdate(DataType.INT, null); return new MapValueUpdate(fval, update); } else { throw new DeserializationException("MapValueUpdate only works for arrays and weighted sets"); } case REMOVE: FieldValue fval = ((CollectionDataType) superType).getNestedType().createFieldValue(); fval.deserialize(this); return new RemoveValueUpdate(fval); default: throw new DeserializationException( "Could not deserialize ValueUpdate, unknown valueUpdateClassID type " + vuTypeId); } } public void read(FieldUpdate fieldUpdate) { int fieldId = getInt(null); Field field = fieldUpdate.getDocumentType().getField(fieldId, fieldUpdate.getSerializationVersion()); if (field == null) { throw new DeserializationException( "Cannot deserialize FieldUpdate, field fieldId " + fieldId + " not found in " + fieldUpdate.getDocumentType()); } fieldUpdate.setField(field); int size = getInt(null); for (int i = 0; i < size; i++) { if (field.getDataType() instanceof CollectionDataType) { CollectionDataType collType = (CollectionDataType) field.getDataType(); fieldUpdate.addValueUpdate(getValueUpdate(collType, collType.getNestedType())); } else { fieldUpdate.addValueUpdate(getValueUpdate(field.getDataType(), null)); } } } public DocumentId readDocumentId() { Utf8String uri = new Utf8String(parseNullTerminatedString(getBuf().getByteBuffer())); return DocumentId.createFromSerialized(uri.toString()); } public DocumentType readDocumentType() { Utf8Array docTypeName = parseNullTerminatedString(); int ignored = getShort(null); DocumentType docType = manager.getDocumentType(new DataTypeName(docTypeName)); if (docType == null) { throw new DeserializationException("No known document type with name " + new Utf8String(docTypeName).toString()); } return docType; } private SpanNode readSpanNode() { byte type = buf.get(); buf.position(buf.position() - 1); SpanNode retval; if ((type & Span.ID) == Span.ID) { retval = new Span(); if (spanNodes != null) { spanNodes.add(retval); } read((Span) retval); } else if ((type & SpanList.ID) == SpanList.ID) { retval = new SpanList(); if (spanNodes != null) { spanNodes.add(retval); } read((SpanList) retval); } else if ((type & AlternateSpanList.ID) == AlternateSpanList.ID) { retval = new AlternateSpanList(); if (spanNodes != null) { spanNodes.add(retval); } read((AlternateSpanList) retval); } else { throw new DeserializationException("Cannot read SpanNode of type " + type); } return retval; } private void readSpanTree(SpanTree tree, boolean readName) { if (spanNodes != null || annotations != null) { throw new SerializationException("Deserialization of nested SpanTrees is not supported."); } spanNodes = new ArrayList<SpanNode>(); annotations = new ArrayList<Annotation>(); try { if (readName) { StringFieldValue treeName = new StringFieldValue(); treeName.deserialize(this); tree.setName(treeName.getString()); } SpanNode root = readSpanNode(); tree.setRoot(root); int numAnnotations = buf.getInt1_2_4Bytes(); for (int i = 0; i < numAnnotations; i++) { Annotation a = new Annotation(); annotations.add(a); } for (int i = 0; i < numAnnotations; i++) { read(annotations.get(i)); } for (Annotation a : annotations) { tree.annotate(a); } for (SpanNode node: spanNodes) { if (node instanceof Span) { correctIndexes((Span) node); } } } finally { spanNodes = null; annotations = null; } } public void read(SpanTree tree) { readSpanTree(tree, true); } public void read(Annotation annotation) { int annotationTypeId = buf.getInt(); AnnotationType type = manager.getAnnotationTypeRegistry().getType(annotationTypeId); if (type == null) { throw new DeserializationException("Cannot deserialize annotation of type " + annotationTypeId + " (unknown type)"); } annotation.setType(type); byte features = buf.get(); int length = buf.getInt1_2_4Bytes(); if ((features & (byte) 1) == (byte) 1) { int spanNodeId = buf.getInt1_2_4Bytes(); try { SpanNode node = spanNodes.get(spanNodeId); annotation.setSpanNode(node); } catch (IndexOutOfBoundsException ioobe) { throw new DeserializationException("Could not deserialize annotation, associated span node not found ", ioobe); } } if ((features & (byte) 2) == (byte) 2) { int dataTypeId = buf.getInt(); if (dataTypeId != type.getDataType().getId()) { buf.position(buf.position() + length - 4); } else { FieldValue value = type.getDataType().createFieldValue(); value.deserialize(this); annotation.setFieldValue(value); } } } public void read(Span span) { byte type = buf.get(); if ((type & Span.ID) != Span.ID) { throw new DeserializationException("Cannot deserialize Span with type " + type); } span.setFrom(buf.getInt1_2_4Bytes()); span.setLength(buf.getInt1_2_4Bytes()); } private void correctIndexes(Span span) { if (stringPositions == null) { throw new DeserializationException("Cannot deserialize Span, no access to parent StringFieldValue."); } int fromIndex = stringPositions[span.getFrom()]; int toIndex = stringPositions[span.getTo()]; int length = toIndex - fromIndex; span.setFrom(fromIndex); span.setLength(length); } public void read(SpanList spanList) { byte type = buf.get(); if ((type & SpanList.ID) != SpanList.ID) { throw new DeserializationException("Cannot deserialize SpanList with type " + type); } List<SpanNode> nodes = readSpanList(spanList); for (SpanNode node : nodes) { spanList.add(node); } } public void read(AlternateSpanList altSpanList) { byte type = buf.get(); if ((type & AlternateSpanList.ID) != AlternateSpanList.ID) { throw new DeserializationException("Cannot deserialize AlternateSpanList with type " + type); } int numSubTrees = buf.getInt1_2_4Bytes(); for (int i = 0; i < numSubTrees; i++) { double prob = buf.getDouble(); List<SpanNode> list = readSpanList(altSpanList); if (i == 0) { for (SpanNode node : list) { altSpanList.add(node); } altSpanList.setProbability(0, prob); } else { altSpanList.addChildren(i, list, prob); } } } private List<SpanNode> readSpanList(SpanNodeParent parent) { int size = buf.getInt1_2_4Bytes(); List<SpanNode> spanList = new ArrayList<SpanNode>(); for (int i = 0; i < size; i++) { spanList.add(readSpanNode()); } return spanList; } }
class VespaDocumentDeserializer42 extends VespaDocumentSerializer42 implements DocumentDeserializer { private final Compressor compressor = new Compressor(); private DocumentTypeManager manager; GrowableByteBuffer body; private short version; private List<SpanNode> spanNodes; private List<Annotation> annotations; private int[] stringPositions; VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer header, GrowableByteBuffer body, short version) { super(header); this.manager = manager; this.body = body; this.version = version; } VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer buf) { this(manager, buf, null, Document.SERIALIZED_VERSION); } VespaDocumentDeserializer42(DocumentTypeManager manager, GrowableByteBuffer buf, GrowableByteBuffer body) { this(manager, buf, body, Document.SERIALIZED_VERSION); } final public DocumentTypeManager getDocumentTypeManager() { return manager; } public void read(Document document) { read(null, document); } public void read(FieldBase field, Document doc) { version = getShort(null); if (version < 6 || version > Document.SERIALIZED_VERSION) { throw new DeserializationException("Unknown version " + version + ", expected " + Document.SERIALIZED_VERSION + "."); } int dataLength = 0; int dataPos = 0; if (version < 7) { getInt2_4_8Bytes(null); } else { dataLength = getInt(null); dataPos = position(); } doc.setId(readDocumentId()); Byte content = getByte(null); doc.setDataType(readDocumentType()); if ((content & 0x2) != 0) { doc.getHeader().deserialize(new Field("header"),this); } if ((content & 0x4) != 0) { doc.getBody().deserialize(new Field("body"),this); } else if (body != null) { GrowableByteBuffer header = getBuf(); setBuf(body); body = null; doc.getBody().deserialize(new Field("body"), this); body = getBuf(); setBuf(header); } if (version < 8) { int crcVal = getInt(null); } if (version > 6) { if (dataLength != (position() - dataPos)) { throw new DeserializationException("Length mismatch"); } } } public void read(FieldBase field, FieldValue value) { throw new IllegalArgumentException("read not implemented yet."); } public <T extends FieldValue> void read(FieldBase field, Array<T> array) { int numElements = getNumCollectionElems(); ArrayList<T> list = new ArrayList<T>(numElements); ArrayDataType type = array.getDataType(); for (int i = 0; i < numElements; i++) { if (version < 7) { getInt(null); } FieldValue fv = type.getNestedType().createFieldValue(); fv.deserialize(null, this); list.add((T) fv); } array.clear(); array.addAll(list); } public <K extends FieldValue, V extends FieldValue> void read(FieldBase field, MapFieldValue<K, V> map) { int numElements = getNumCollectionElems(); Map<K,V> hash = new HashMap<>(); MapDataType type = map.getDataType(); for (int i = 0; i < numElements; i++) { if (version < 7) { getInt(null); } K key = (K) type.getKeyType().createFieldValue(); V val = (V) type.getValueType().createFieldValue(); key.deserialize(null, this); val.deserialize(null, this); hash.put(key, val); } map.clear(); map.putAll(hash); } private int getNumCollectionElems() { int numElements; if (version < 7) { getInt(null); numElements = getInt(null); } else { numElements = getInt1_2_4Bytes(null); } if (numElements < 0) { throw new DeserializationException("Bad number of array/map elements, " + numElements); } return numElements; } public <T extends FieldValue> void read(FieldBase field, CollectionFieldValue<T> value) { throw new IllegalArgumentException("read not implemented yet."); } public void read(FieldBase field, ByteFieldValue value) { value.assign(getByte(null)); } public void read(FieldBase field, DoubleFieldValue value) { value.assign(getDouble(null)); } public void read(FieldBase field, FloatFieldValue value) { value.assign(getFloat(null)); } public void read(FieldBase field, IntegerFieldValue value) { value.assign(getInt(null)); } public void read(FieldBase field, LongFieldValue value) { value.assign(getLong(null)); } public void read(FieldBase field, Raw value) { int rawsize = getInt(null); byte[] rawBytes = getBytes(null, rawsize); value.assign(rawBytes); } @Override public void read(FieldBase field, PredicateFieldValue value) { int len = getInt(null); byte[] buf = getBytes(null, len); value.assign(BinaryFormat.decode(buf)); } public void read(FieldBase field, StringFieldValue value) { byte coding = getByte(null); int length = getInt1_4Bytes(null); byte[] stringArray = new byte[length - 1]; buf.get(stringArray); buf.get(); value.setUnChecked(Utf8.toString(stringArray)); if ((coding & 64) == 64) { try { stringPositions = calculateStringPositions(stringArray); int size = buf.getInt(); int startPos = buf.position(); int numSpanTrees = buf.getInt1_2_4Bytes(); for (int i = 0; i < numSpanTrees; i++) { SpanTree tree = new SpanTree(); StringFieldValue treeName = new StringFieldValue(); treeName.deserialize(this); tree.setName(treeName.getString()); value.setSpanTree(tree); readSpanTree(tree, false); } buf.position(startPos + size); } finally { stringPositions = null; } } } @Override public void read(FieldBase field, TensorFieldValue value) { int encodedTensorLength = buf.getInt1_4Bytes(); if (encodedTensorLength > 0) { byte[] encodedTensor = getBytes(null, encodedTensorLength); value.assign(TypedBinaryFormat.decode(Optional.of(value.getDataType().getTensorType()), GrowableByteBuffer.wrap(encodedTensor))); } else { value.clear(); } } @Override public void read(FieldBase field, ReferenceFieldValue value) { final boolean documentIdPresent = (buf.get() != 0); if (documentIdPresent) { value.assign(readDocumentId()); } else { value.clear(); } } public void read(FieldBase fieldDef, Struct s) { s.setVersion(version); int startPos = position(); if (version < 6) { throw new DeserializationException("Illegal document serialization version " + version); } int dataSize; if (version < 7) { long rSize = getInt2_4_8Bytes(null); if (rSize > Integer.MAX_VALUE) { throw new DeserializationException("Raw size of data block is too large."); } dataSize = (int)rSize; } else { dataSize = getInt(null); } byte comprCode = getByte(null); CompressionType compression = CompressionType.valueOf(comprCode); int uncompressedSize = 0; if (compression != CompressionType.NONE && compression != CompressionType.INCOMPRESSIBLE) { long pSize = getInt2_4_8Bytes(null); if (pSize > Integer.MAX_VALUE) { throw new DeserializationException("Uncompressed size of data block is too large."); } uncompressedSize = (int) pSize; } int numberOfFields = getInt1_4Bytes(null); List<Tuple2<Integer, Long>> fieldIdsAndLengths = new ArrayList<>(numberOfFields); for (int i=0; i<numberOfFields; ++i) { fieldIdsAndLengths.add(new Tuple2<>(getInt1_4Bytes(null), getInt2_4_8Bytes(null))); } GrowableByteBuffer bigBuf = buf; if (version < 7) { int headerSize = position() - startPos; dataSize -= headerSize; } byte[] destination = compressor.decompress(compression, getBuf().array(), position(), uncompressedSize, Optional.of(dataSize)); position(position() + dataSize); buf = GrowableByteBuffer.wrap(destination); s.clear(); StructDataType type = s.getDataType(); for (int i=0; i<numberOfFields; ++i) { Field structField = type.getField(fieldIdsAndLengths.get(i).first, version); if (structField == null) { position(position() + fieldIdsAndLengths.get(i).second.intValue()); } else { int posBefore = position(); FieldValue value = structField.getDataType().createFieldValue(); value.deserialize(structField, this); s.setFieldValue(structField, value); position(posBefore + fieldIdsAndLengths.get(i).second.intValue()); } } buf = bigBuf; } public void read(FieldBase field, StructuredFieldValue value) { throw new IllegalArgumentException("read not implemented yet."); } public <T extends FieldValue> void read(FieldBase field, WeightedSet<T> ws) { WeightedSetDataType type = ws.getDataType(); getInt(null); int numElements = getInt(null); if (numElements < 0) { throw new DeserializationException("Bad number of weighted set elements, " + numElements); } ws.clearAndReserve(numElements * 2); for (int i = 0; i < numElements; i++) { int size = getInt(null); FieldValue value = type.getNestedType().createFieldValue(); value.deserialize(null, this); IntegerFieldValue weight = new IntegerFieldValue(getInt(null)); ws.putUnChecked((T) value, weight); } } public void read(FieldBase field, AnnotationReference value) { int seqId = buf.getInt1_2_4Bytes(); try { Annotation a = annotations.get(seqId); value.setReferenceNoCompatibilityCheck(a); } catch (IndexOutOfBoundsException iiobe) { throw new SerializationException("Could not serialize AnnotationReference value, reference not found.", iiobe); } } private Utf8String deserializeAttributeString() throws DeserializationException { int length = getByte(null); return new Utf8String(parseNullTerminatedString(length)); } private Utf8Array parseNullTerminatedString() { return parseNullTerminatedString(getBuf().getByteBuffer()); } private Utf8Array parseNullTerminatedString(int lengthExcludingNull) { return parseNullTerminatedString(getBuf().getByteBuffer(), lengthExcludingNull); } static Utf8Array parseNullTerminatedString(ByteBuffer buf, int lengthExcludingNull) throws DeserializationException { Utf8Array utf8 = new Utf8Array(buf, lengthExcludingNull); buf.get(); return utf8; } static Utf8Array parseNullTerminatedString(ByteBuffer buf) throws DeserializationException { int end = getFirstNullByte(buf); if (end == -1) { throw new DeserializationException("Could not locate terminating 0-byte for string"); } return parseNullTerminatedString(buf, end - buf.position()); } private static int getFirstNullByte(ByteBuffer buf) { int end = -1; int start = buf.position(); while (true) { try { byte dataByte = buf.get(); if (dataByte == (byte) 0) { end = buf.position() - 1; break; } } catch (Exception e) { break; } } buf.position(start); return end; } public void read(FieldPathUpdate update) { String fieldPath = getString(null); String whereClause = getString(null); update.setFieldPath(fieldPath); try { update.setWhereClause(whereClause); } catch (ParseException e) { throw new DeserializationException(e); } } public void read(AssignFieldPathUpdate update) { byte flags = getByte(null); update.setRemoveIfZero((flags & AssignFieldPathUpdate.REMOVE_IF_ZERO) != 0); update.setCreateMissingPath((flags & AssignFieldPathUpdate.CREATE_MISSING_PATH) != 0); if ((flags & AssignFieldPathUpdate.ARITHMETIC_EXPRESSION) != 0) { update.setExpression(getString(null)); } else { DataType dt = update.getFieldPath().getResultingDataType(); FieldValue fv = dt.createFieldValue(); fv.deserialize(this); update.setNewValue(fv); } } public void read(RemoveFieldPathUpdate update) { } public void read(AddFieldPathUpdate update) { DataType dt = update.getFieldPath().getResultingDataType(); FieldValue fv = dt.createFieldValue(); dt.createFieldValue(); fv.deserialize(this); if (!(fv instanceof Array)) { throw new DeserializationException("Add only applicable to array types"); } update.setNewValues((Array)fv); } public ValueUpdate getValueUpdate(DataType superType, DataType subType) { int vuTypeId = getInt(null); ValueUpdate.ValueUpdateClassID op = ValueUpdate.ValueUpdateClassID.getID(vuTypeId); if (op == null) { throw new IllegalArgumentException("Read type "+vuTypeId+" of bytebuffer, but this is not a legal value update type."); } switch (op) { case ADD: { FieldValue fval = subType.createFieldValue(); fval.deserialize(this); int weight = getInt(null); return new AddValueUpdate(fval, weight); } case ARITHMETIC: int opId = getInt(null); ArithmeticValueUpdate.Operator operator = ArithmeticValueUpdate.Operator.getID(opId); double operand = getDouble(null); return new ArithmeticValueUpdate(operator, operand); case ASSIGN: { byte contents = getByte(null); FieldValue fval = null; if (contents == (byte) 1) { fval = superType.createFieldValue(); fval.deserialize(this); } return new AssignValueUpdate(fval); } case CLEAR: return new ClearValueUpdate(); case MAP: if (superType instanceof ArrayDataType) { CollectionDataType type = (CollectionDataType) superType; IntegerFieldValue index = new IntegerFieldValue(); index.deserialize(this); ValueUpdate update = getValueUpdate(type.getNestedType(), null); return new MapValueUpdate(index, update); } else if (superType instanceof WeightedSetDataType) { CollectionDataType type = (CollectionDataType) superType; FieldValue fval = type.getNestedType().createFieldValue(); fval.deserialize(this); ValueUpdate update = getValueUpdate(DataType.INT, null); return new MapValueUpdate(fval, update); } else { throw new DeserializationException("MapValueUpdate only works for arrays and weighted sets"); } case REMOVE: FieldValue fval = ((CollectionDataType) superType).getNestedType().createFieldValue(); fval.deserialize(this); return new RemoveValueUpdate(fval); default: throw new DeserializationException( "Could not deserialize ValueUpdate, unknown valueUpdateClassID type " + vuTypeId); } } public void read(FieldUpdate fieldUpdate) { int fieldId = getInt(null); Field field = fieldUpdate.getDocumentType().getField(fieldId, fieldUpdate.getSerializationVersion()); if (field == null) { throw new DeserializationException( "Cannot deserialize FieldUpdate, field fieldId " + fieldId + " not found in " + fieldUpdate.getDocumentType()); } fieldUpdate.setField(field); int size = getInt(null); for (int i = 0; i < size; i++) { if (field.getDataType() instanceof CollectionDataType) { CollectionDataType collType = (CollectionDataType) field.getDataType(); fieldUpdate.addValueUpdate(getValueUpdate(collType, collType.getNestedType())); } else { fieldUpdate.addValueUpdate(getValueUpdate(field.getDataType(), null)); } } } public DocumentId readDocumentId() { Utf8String uri = new Utf8String(parseNullTerminatedString(getBuf().getByteBuffer())); return DocumentId.createFromSerialized(uri.toString()); } public DocumentType readDocumentType() { Utf8Array docTypeName = parseNullTerminatedString(); int ignored = getShort(null); DocumentType docType = manager.getDocumentType(new DataTypeName(docTypeName)); if (docType == null) { throw new DeserializationException("No known document type with name " + new Utf8String(docTypeName).toString()); } return docType; } private SpanNode readSpanNode() { byte type = buf.get(); buf.position(buf.position() - 1); SpanNode retval; if ((type & Span.ID) == Span.ID) { retval = new Span(); if (spanNodes != null) { spanNodes.add(retval); } read((Span) retval); } else if ((type & SpanList.ID) == SpanList.ID) { retval = new SpanList(); if (spanNodes != null) { spanNodes.add(retval); } read((SpanList) retval); } else if ((type & AlternateSpanList.ID) == AlternateSpanList.ID) { retval = new AlternateSpanList(); if (spanNodes != null) { spanNodes.add(retval); } read((AlternateSpanList) retval); } else { throw new DeserializationException("Cannot read SpanNode of type " + type); } return retval; } private void readSpanTree(SpanTree tree, boolean readName) { if (spanNodes != null || annotations != null) { throw new SerializationException("Deserialization of nested SpanTrees is not supported."); } spanNodes = new ArrayList<SpanNode>(); annotations = new ArrayList<Annotation>(); try { if (readName) { StringFieldValue treeName = new StringFieldValue(); treeName.deserialize(this); tree.setName(treeName.getString()); } SpanNode root = readSpanNode(); tree.setRoot(root); int numAnnotations = buf.getInt1_2_4Bytes(); for (int i = 0; i < numAnnotations; i++) { Annotation a = new Annotation(); annotations.add(a); } for (int i = 0; i < numAnnotations; i++) { read(annotations.get(i)); } for (Annotation a : annotations) { tree.annotate(a); } for (SpanNode node: spanNodes) { if (node instanceof Span) { correctIndexes((Span) node); } } } finally { spanNodes = null; annotations = null; } } public void read(SpanTree tree) { readSpanTree(tree, true); } public void read(Annotation annotation) { int annotationTypeId = buf.getInt(); AnnotationType type = manager.getAnnotationTypeRegistry().getType(annotationTypeId); if (type == null) { throw new DeserializationException("Cannot deserialize annotation of type " + annotationTypeId + " (unknown type)"); } annotation.setType(type); byte features = buf.get(); int length = buf.getInt1_2_4Bytes(); if ((features & (byte) 1) == (byte) 1) { int spanNodeId = buf.getInt1_2_4Bytes(); try { SpanNode node = spanNodes.get(spanNodeId); annotation.setSpanNode(node); } catch (IndexOutOfBoundsException ioobe) { throw new DeserializationException("Could not deserialize annotation, associated span node not found ", ioobe); } } if ((features & (byte) 2) == (byte) 2) { int dataTypeId = buf.getInt(); if (dataTypeId != type.getDataType().getId()) { buf.position(buf.position() + length - 4); } else { FieldValue value = type.getDataType().createFieldValue(); value.deserialize(this); annotation.setFieldValue(value); } } } public void read(Span span) { byte type = buf.get(); if ((type & Span.ID) != Span.ID) { throw new DeserializationException("Cannot deserialize Span with type " + type); } span.setFrom(buf.getInt1_2_4Bytes()); span.setLength(buf.getInt1_2_4Bytes()); } private void correctIndexes(Span span) { if (stringPositions == null) { throw new DeserializationException("Cannot deserialize Span, no access to parent StringFieldValue."); } int fromIndex = stringPositions[span.getFrom()]; int toIndex = stringPositions[span.getTo()]; int length = toIndex - fromIndex; span.setFrom(fromIndex); span.setLength(length); } public void read(SpanList spanList) { byte type = buf.get(); if ((type & SpanList.ID) != SpanList.ID) { throw new DeserializationException("Cannot deserialize SpanList with type " + type); } List<SpanNode> nodes = readSpanList(spanList); for (SpanNode node : nodes) { spanList.add(node); } } public void read(AlternateSpanList altSpanList) { byte type = buf.get(); if ((type & AlternateSpanList.ID) != AlternateSpanList.ID) { throw new DeserializationException("Cannot deserialize AlternateSpanList with type " + type); } int numSubTrees = buf.getInt1_2_4Bytes(); for (int i = 0; i < numSubTrees; i++) { double prob = buf.getDouble(); List<SpanNode> list = readSpanList(altSpanList); if (i == 0) { for (SpanNode node : list) { altSpanList.add(node); } altSpanList.setProbability(0, prob); } else { altSpanList.addChildren(i, list, prob); } } } private List<SpanNode> readSpanList(SpanNodeParent parent) { int size = buf.getInt1_2_4Bytes(); List<SpanNode> spanList = new ArrayList<SpanNode>(); for (int i = 0; i < size; i++) { spanList.add(readSpanNode()); } return spanList; } }
Correct
public void read(DocumentUpdate update) { update.setId(new DocumentId(this)); update.setDocumentType(readDocumentType()); int size = getInt(null); for (int i = 0; i < size; i++) { update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), 8)); } int sizeAndFlags = getInt(null); update.setCreateIfNonExistent(DocumentUpdateFlags.extractFlags(sizeAndFlags).getCreateIfNonExistent()); size = DocumentUpdateFlags.extractValue(sizeAndFlags); for (int i = 0; i < size; i++) { int type = getByte(null); update.addFieldPathUpdate(FieldPathUpdate.create(FieldPathUpdate.Type.valueOf(type), update.getDocumentType(), this)); } }
update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), 8));
public void read(DocumentUpdate update) { update.setId(new DocumentId(this)); update.setDocumentType(readDocumentType()); int size = getInt(null); for (int i = 0; i < size; i++) { update.addFieldUpdate(new FieldUpdate(this, update.getDocumentType(), 8)); } int sizeAndFlags = getInt(null); update.setCreateIfNonExistent(DocumentUpdateFlags.extractFlags(sizeAndFlags).getCreateIfNonExistent()); size = DocumentUpdateFlags.extractValue(sizeAndFlags); for (int i = 0; i < size; i++) { int type = getByte(null); update.addFieldPathUpdate(FieldPathUpdate.create(FieldPathUpdate.Type.valueOf(type), update.getDocumentType(), this)); } }
class VespaDocumentDeserializerHead extends VespaDocumentDeserializer42 { public VespaDocumentDeserializerHead(DocumentTypeManager manager, GrowableByteBuffer buffer) { super(manager, buffer); } @Override }
class VespaDocumentDeserializerHead extends VespaDocumentDeserializer42 { public VespaDocumentDeserializerHead(DocumentTypeManager manager, GrowableByteBuffer buffer) { super(manager, buffer); } @Override }
This shouldn't be here...
public void staggering() { System.err.println(3 * 2 / 3); }
System.err.println(3 * 2 / 3);
public void staggering() { List<HostName> cluster = Arrays.asList(HostName.from("cfg1"), HostName.from("cfg2"), HostName.from("cfg3")); Instant now = Instant.ofEpochMilli(1001); Duration interval = Duration.ofMillis(300); assertEquals(299, Maintainer.staggeredDelay(cluster, HostName.from("cfg1"), now, interval)); assertEquals(399, Maintainer.staggeredDelay(cluster, HostName.from("cfg2"), now, interval)); assertEquals(199, Maintainer.staggeredDelay(cluster, HostName.from("cfg3"), now, interval)); now = Instant.ofEpochMilli(1101); assertEquals(199, Maintainer.staggeredDelay(cluster, HostName.from("cfg1"), now, interval)); assertEquals(299, Maintainer.staggeredDelay(cluster, HostName.from("cfg2"), now, interval)); assertEquals(399, Maintainer.staggeredDelay(cluster, HostName.from("cfg3"), now, interval)); assertEquals(300, Maintainer.staggeredDelay(cluster, HostName.from("cfg0"), now, interval)); }
class MaintainerTest { private ControllerTester tester; @Before public void before() { tester = new ControllerTester(); } @Test public void only_runs_in_permitted_systems() { AtomicInteger executions = new AtomicInteger(); maintainerIn(SystemName.cd, executions).run(); maintainerIn(SystemName.main, executions).run(); assertEquals(1, executions.get()); } @Test private Maintainer maintainerIn(SystemName system, AtomicInteger executions) { return new Maintainer(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()), "MockMaintainer", EnumSet.of(system)) { @Override protected void maintain() { executions.incrementAndGet(); } }; } }
class MaintainerTest { private ControllerTester tester; @Before public void before() { tester = new ControllerTester(); } @Test public void only_runs_in_permitted_systems() { AtomicInteger executions = new AtomicInteger(); maintainerIn(SystemName.cd, executions).run(); maintainerIn(SystemName.main, executions).run(); assertEquals(1, executions.get()); } @Test private Maintainer maintainerIn(SystemName system, AtomicInteger executions) { return new Maintainer(tester.controller(), Duration.ofDays(1), new JobControl(tester.controller().curator()), "MockMaintainer", EnumSet.of(system)) { @Override protected void maintain() { executions.incrementAndGet(); } }; } }
Consider using `EnumSet.of(...)` here.
protected void maintain() { List<Node.State> targetStates = Arrays.asList(Node.State.active, Node.State.ready); List<Node> nodesToReboot = nodeRepository().getNodes().stream() .filter(node -> targetStates.contains(node.state())) .filter(node -> node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) .filter(this::shouldReboot) .collect(Collectors.toList()); if (!nodesToReboot.isEmpty()) nodeRepository().reboot(NodeListFilter.from(nodesToReboot)); }
List<Node.State> targetStates = Arrays.asList(Node.State.active, Node.State.ready);
protected void maintain() { EnumSet<Node.State> targetStates = EnumSet.of(Node.State.active, Node.State.ready); List<Node> nodesToReboot = nodeRepository().getNodes().stream() .filter(node -> targetStates.contains(node.state())) .filter(node -> node.flavor().getType() != Flavor.Type.DOCKER_CONTAINER) .filter(this::shouldReboot) .collect(Collectors.toList()); if (!nodesToReboot.isEmpty()) nodeRepository().reboot(NodeListFilter.from(nodesToReboot)); }
class NodeRebooter extends Maintainer { private final Duration rebootInterval; private final Clock clock; private final Random random; public NodeRebooter(NodeRepository nodeRepository, Clock clock, Duration rebootInterval, JobControl jobControl) { super(nodeRepository, min(Duration.ofMinutes(25), rebootInterval), jobControl); this.rebootInterval = rebootInterval; this.clock = clock; this.random = new Random(clock.millis()); } @Override private boolean shouldReboot(Node node) { if (node.history().hasEventAfter(History.Event.Type.rebooted, clock.instant().minus(rebootInterval))) return false; else return random.nextDouble() < (double) interval().getSeconds() / (double)rebootInterval.getSeconds(); } }
class NodeRebooter extends Maintainer { private final Duration rebootInterval; private final Clock clock; private final Random random; public NodeRebooter(NodeRepository nodeRepository, Clock clock, Duration rebootInterval, JobControl jobControl) { super(nodeRepository, min(Duration.ofMinutes(25), rebootInterval), jobControl); this.rebootInterval = rebootInterval; this.clock = clock; this.random = new Random(clock.millis()); } @Override private boolean shouldReboot(Node node) { if (node.history().hasEventAfter(History.Event.Type.rebooted, clock.instant().minus(rebootInterval))) return false; else return random.nextDouble() < (double) interval().getSeconds() / (double)rebootInterval.getSeconds(); } }
Should this be `(...).append('\'');` to avoid double spacing (a sign of the unrefined gentleman)?
public String toString() { StringBuilder b = new StringBuilder(); b.append("Detail "); b.append("resultType=").append(resultType); if (exception != null) { b.append(" exception='").append(Exceptions.toMessageString(exception)).append("' "); } if (traceMessage != null && ! traceMessage.isEmpty()) { b.append(" trace='").append(traceMessage).append("' "); } b.append(" endpoint=").append(endpoint); b.append(" resultTimeLocally=").append(timeStampMillis).append("\n"); return b.toString(); }
b.append(" exception='").append(Exceptions.toMessageString(exception)).append("' ");
public String toString() { StringBuilder b = new StringBuilder(); b.append("Detail "); b.append("resultType=").append(resultType); if (exception != null) { b.append(" exception='").append(Exceptions.toMessageString(exception)).append("'"); } if (traceMessage != null && ! traceMessage.isEmpty()) { b.append(" trace='").append(traceMessage).append("'"); } b.append(" endpoint=").append(endpoint); b.append(" resultTimeLocally=").append(timeStampMillis).append("\n"); return b.toString(); }
class Detail { private final ResultType resultType; private final Endpoint endpoint; private final Exception exception; private final String traceMessage; private final long timeStampMillis = System.currentTimeMillis(); public Detail(Endpoint endpoint, ResultType resultType, String traceMessage, Exception e) { this.endpoint = endpoint; this.resultType = resultType; this.exception = e; this.traceMessage = traceMessage; } public Detail(Endpoint endpoint) { this.endpoint = endpoint; this.resultType = ResultType.OPERATION_EXECUTED; this.exception = null; this.traceMessage = null; } /** * Returns the endpoint from which the result was received. * * @return the endpoint from which the result was received. */ public Endpoint getEndpoint() { return endpoint; } /** * Check if operation was successful. * * @return true if the operation was successful. */ public boolean isSuccess() { return resultType == ResultType.OPERATION_EXECUTED; } /** * @deprecated use getResultType. * Returns true if an error is transient, false if it is permanent. Irrelevant * if {@link * * @return true if an error is transient (or there is no error), false otherwise. */ @Deprecated public boolean isTransient() { return resultType == ResultType.TRANSITIVE_ERROR || resultType == ResultType.OPERATION_EXECUTED; } /** * Returns the result of the operation. */ public ResultType getResultType() { return resultType; } /** * Returns any exception related to this Detail, if unsuccessful. Might be null. * * @return any exception related to this Detail, if unsuccessful. Might be null. */ public Exception getException() { return exception; } /** * Returns trace message if any from gateway. * @return null or trace message. */ public String getTraceMessage() { return traceMessage; } @Override }
class Detail { private final ResultType resultType; private final Endpoint endpoint; private final Exception exception; private final String traceMessage; private final long timeStampMillis = System.currentTimeMillis(); public Detail(Endpoint endpoint, ResultType resultType, String traceMessage, Exception e) { this.endpoint = endpoint; this.resultType = resultType; this.exception = e; this.traceMessage = traceMessage; } public Detail(Endpoint endpoint) { this.endpoint = endpoint; this.resultType = ResultType.OPERATION_EXECUTED; this.exception = null; this.traceMessage = null; } /** * Returns the endpoint from which the result was received. * * @return the endpoint from which the result was received. */ public Endpoint getEndpoint() { return endpoint; } /** * Check if operation was successful. * * @return true if the operation was successful. */ public boolean isSuccess() { return resultType == ResultType.OPERATION_EXECUTED; } /** * @deprecated use getResultType. * Returns true if an error is transient, false if it is permanent. Irrelevant * if {@link * * @return true if an error is transient (or there is no error), false otherwise. */ @Deprecated public boolean isTransient() { return resultType == ResultType.TRANSITIVE_ERROR || resultType == ResultType.OPERATION_EXECUTED; } /** * Returns the result of the operation. */ public ResultType getResultType() { return resultType; } /** * Returns any exception related to this Detail, if unsuccessful. Might be null. * * @return any exception related to this Detail, if unsuccessful. Might be null. */ public Exception getException() { return exception; } /** * Returns trace message if any from gateway. * @return null or trace message. */ public String getTraceMessage() { return traceMessage; } @Override }
Also here?
public String toString() { StringBuilder b = new StringBuilder(); b.append("Detail "); b.append("resultType=").append(resultType); if (exception != null) { b.append(" exception='").append(Exceptions.toMessageString(exception)).append("' "); } if (traceMessage != null && ! traceMessage.isEmpty()) { b.append(" trace='").append(traceMessage).append("' "); } b.append(" endpoint=").append(endpoint); b.append(" resultTimeLocally=").append(timeStampMillis).append("\n"); return b.toString(); }
b.append(" trace='").append(traceMessage).append("' ");
public String toString() { StringBuilder b = new StringBuilder(); b.append("Detail "); b.append("resultType=").append(resultType); if (exception != null) { b.append(" exception='").append(Exceptions.toMessageString(exception)).append("'"); } if (traceMessage != null && ! traceMessage.isEmpty()) { b.append(" trace='").append(traceMessage).append("'"); } b.append(" endpoint=").append(endpoint); b.append(" resultTimeLocally=").append(timeStampMillis).append("\n"); return b.toString(); }
class Detail { private final ResultType resultType; private final Endpoint endpoint; private final Exception exception; private final String traceMessage; private final long timeStampMillis = System.currentTimeMillis(); public Detail(Endpoint endpoint, ResultType resultType, String traceMessage, Exception e) { this.endpoint = endpoint; this.resultType = resultType; this.exception = e; this.traceMessage = traceMessage; } public Detail(Endpoint endpoint) { this.endpoint = endpoint; this.resultType = ResultType.OPERATION_EXECUTED; this.exception = null; this.traceMessage = null; } /** * Returns the endpoint from which the result was received. * * @return the endpoint from which the result was received. */ public Endpoint getEndpoint() { return endpoint; } /** * Check if operation was successful. * * @return true if the operation was successful. */ public boolean isSuccess() { return resultType == ResultType.OPERATION_EXECUTED; } /** * @deprecated use getResultType. * Returns true if an error is transient, false if it is permanent. Irrelevant * if {@link * * @return true if an error is transient (or there is no error), false otherwise. */ @Deprecated public boolean isTransient() { return resultType == ResultType.TRANSITIVE_ERROR || resultType == ResultType.OPERATION_EXECUTED; } /** * Returns the result of the operation. */ public ResultType getResultType() { return resultType; } /** * Returns any exception related to this Detail, if unsuccessful. Might be null. * * @return any exception related to this Detail, if unsuccessful. Might be null. */ public Exception getException() { return exception; } /** * Returns trace message if any from gateway. * @return null or trace message. */ public String getTraceMessage() { return traceMessage; } @Override }
class Detail { private final ResultType resultType; private final Endpoint endpoint; private final Exception exception; private final String traceMessage; private final long timeStampMillis = System.currentTimeMillis(); public Detail(Endpoint endpoint, ResultType resultType, String traceMessage, Exception e) { this.endpoint = endpoint; this.resultType = resultType; this.exception = e; this.traceMessage = traceMessage; } public Detail(Endpoint endpoint) { this.endpoint = endpoint; this.resultType = ResultType.OPERATION_EXECUTED; this.exception = null; this.traceMessage = null; } /** * Returns the endpoint from which the result was received. * * @return the endpoint from which the result was received. */ public Endpoint getEndpoint() { return endpoint; } /** * Check if operation was successful. * * @return true if the operation was successful. */ public boolean isSuccess() { return resultType == ResultType.OPERATION_EXECUTED; } /** * @deprecated use getResultType. * Returns true if an error is transient, false if it is permanent. Irrelevant * if {@link * * @return true if an error is transient (or there is no error), false otherwise. */ @Deprecated public boolean isTransient() { return resultType == ResultType.TRANSITIVE_ERROR || resultType == ResultType.OPERATION_EXECUTED; } /** * Returns the result of the operation. */ public ResultType getResultType() { return resultType; } /** * Returns any exception related to this Detail, if unsuccessful. Might be null. * * @return any exception related to this Detail, if unsuccessful. Might be null. */ public Exception getException() { return exception; } /** * Returns trace message if any from gateway. * @return null or trace message. */ public String getTraceMessage() { return traceMessage; } @Override }
Sigh, yes. Fixing
public String toString() { StringBuilder b = new StringBuilder(); b.append("Detail "); b.append("resultType=").append(resultType); if (exception != null) { b.append(" exception='").append(Exceptions.toMessageString(exception)).append("' "); } if (traceMessage != null && ! traceMessage.isEmpty()) { b.append(" trace='").append(traceMessage).append("' "); } b.append(" endpoint=").append(endpoint); b.append(" resultTimeLocally=").append(timeStampMillis).append("\n"); return b.toString(); }
b.append(" exception='").append(Exceptions.toMessageString(exception)).append("' ");
public String toString() { StringBuilder b = new StringBuilder(); b.append("Detail "); b.append("resultType=").append(resultType); if (exception != null) { b.append(" exception='").append(Exceptions.toMessageString(exception)).append("'"); } if (traceMessage != null && ! traceMessage.isEmpty()) { b.append(" trace='").append(traceMessage).append("'"); } b.append(" endpoint=").append(endpoint); b.append(" resultTimeLocally=").append(timeStampMillis).append("\n"); return b.toString(); }
class Detail { private final ResultType resultType; private final Endpoint endpoint; private final Exception exception; private final String traceMessage; private final long timeStampMillis = System.currentTimeMillis(); public Detail(Endpoint endpoint, ResultType resultType, String traceMessage, Exception e) { this.endpoint = endpoint; this.resultType = resultType; this.exception = e; this.traceMessage = traceMessage; } public Detail(Endpoint endpoint) { this.endpoint = endpoint; this.resultType = ResultType.OPERATION_EXECUTED; this.exception = null; this.traceMessage = null; } /** * Returns the endpoint from which the result was received. * * @return the endpoint from which the result was received. */ public Endpoint getEndpoint() { return endpoint; } /** * Check if operation was successful. * * @return true if the operation was successful. */ public boolean isSuccess() { return resultType == ResultType.OPERATION_EXECUTED; } /** * @deprecated use getResultType. * Returns true if an error is transient, false if it is permanent. Irrelevant * if {@link * * @return true if an error is transient (or there is no error), false otherwise. */ @Deprecated public boolean isTransient() { return resultType == ResultType.TRANSITIVE_ERROR || resultType == ResultType.OPERATION_EXECUTED; } /** * Returns the result of the operation. */ public ResultType getResultType() { return resultType; } /** * Returns any exception related to this Detail, if unsuccessful. Might be null. * * @return any exception related to this Detail, if unsuccessful. Might be null. */ public Exception getException() { return exception; } /** * Returns trace message if any from gateway. * @return null or trace message. */ public String getTraceMessage() { return traceMessage; } @Override }
class Detail { private final ResultType resultType; private final Endpoint endpoint; private final Exception exception; private final String traceMessage; private final long timeStampMillis = System.currentTimeMillis(); public Detail(Endpoint endpoint, ResultType resultType, String traceMessage, Exception e) { this.endpoint = endpoint; this.resultType = resultType; this.exception = e; this.traceMessage = traceMessage; } public Detail(Endpoint endpoint) { this.endpoint = endpoint; this.resultType = ResultType.OPERATION_EXECUTED; this.exception = null; this.traceMessage = null; } /** * Returns the endpoint from which the result was received. * * @return the endpoint from which the result was received. */ public Endpoint getEndpoint() { return endpoint; } /** * Check if operation was successful. * * @return true if the operation was successful. */ public boolean isSuccess() { return resultType == ResultType.OPERATION_EXECUTED; } /** * @deprecated use getResultType. * Returns true if an error is transient, false if it is permanent. Irrelevant * if {@link * * @return true if an error is transient (or there is no error), false otherwise. */ @Deprecated public boolean isTransient() { return resultType == ResultType.TRANSITIVE_ERROR || resultType == ResultType.OPERATION_EXECUTED; } /** * Returns the result of the operation. */ public ResultType getResultType() { return resultType; } /** * Returns any exception related to this Detail, if unsuccessful. Might be null. * * @return any exception related to this Detail, if unsuccessful. Might be null. */ public Exception getException() { return exception; } /** * Returns trace message if any from gateway. * @return null or trace message. */ public String getTraceMessage() { return traceMessage; } @Override }
Return `405 Method Not Allowed` response for every method except GET.
public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, "application/json"); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(ByteBuffer.wrap(buildMetricOutput())); } }.dispatch(handler); return null; }
Response response = new Response(Response.Status.OK);
public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, "application/json"); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(ByteBuffer.wrap(buildMetricOutput())); } }.dispatch(handler); return null; }
class MetricsPacketsHandler extends AbstractRequestHandler { static final String APPLICATION_KEY = "application"; static final String TIMESTAMP_KEY = "timestamp"; static final String STATUS_CODE_KEY = "status_code"; static final String STATUS_MSG_KEY = "status_msg"; static final String METRICS_KEY = "metrics"; static final String DIMENSIONS_KEY = "dimensions"; private final StateMonitor monitor; private final Timer timer; private final SnapshotProvider snapshotPreprocessor; private final String applicationName; @Inject public MetricsPacketsHandler(StateMonitor monitor, Timer timer, ComponentRegistry<SnapshotProvider> preprocessors, MetricsPresentationConfig presentation, MetricsPacketsHandlerConfig config) { this.monitor = monitor; this.timer = timer; snapshotPreprocessor = getSnapshotPreprocessor(preprocessors, presentation); applicationName = config.application(); } @Override private byte[] buildMetricOutput() { try { String output = getStatusPacket() + getAllMetricsPackets(); return output.getBytes(StandardCharsets.UTF_8); } catch (JSONException e) { throw new RuntimeException("Bad JSON construction.", e); } } /** * Exactly one status packet is added to the response. */ private String getStatusPacket() throws JSONException { JSONObject packet = new JSONObjectWithLegibleException(); packet.put(APPLICATION_KEY, applicationName); StateMonitor.Status status = monitor.status(); packet.put(STATUS_CODE_KEY, status.ordinal()); packet.put(STATUS_MSG_KEY, status.name()); return jsonToString(packet); } private String jsonToString(JSONObject jsonObject) throws JSONException { return jsonObject.toString(4); } private String getAllMetricsPackets() throws JSONException { StringBuilder ret = new StringBuilder(); List<JSONObject> metricsPackets = getPacketsForSnapshot(getSnapshot(), applicationName, timer.currentTimeMillis()); for (JSONObject packet : metricsPackets) { ret.append("\n\n"); ret.append(jsonToString(packet)); } return ret.toString(); } private MetricSnapshot getSnapshot() { if (snapshotPreprocessor == null) { return monitor.snapshot(); } else { return snapshotPreprocessor.latestSnapshot(); } } private List<JSONObject> getPacketsForSnapshot(MetricSnapshot metricSnapshot, String application, long timestamp) throws JSONException { if (metricSnapshot == null) return Collections.emptyList(); List<JSONObject> packets = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : metricSnapshot) { MetricDimensions metricDimensions = snapshotEntry.getKey(); MetricSet metricSet = snapshotEntry.getValue(); JSONObjectWithLegibleException packet = new JSONObjectWithLegibleException(); addMetaData(timestamp, application, packet); addDimensions(metricDimensions, packet); addMetrics(metricSet, packet); packets.add(packet); } return packets; } private void addMetaData(long timestamp, String application, JSONObjectWithLegibleException packet) { packet.put(APPLICATION_KEY, application); packet.put(TIMESTAMP_KEY, timestamp); } private void addDimensions(MetricDimensions metricDimensions, JSONObjectWithLegibleException packet) throws JSONException { Iterator<Map.Entry<String, String>> dimensionsIterator = metricDimensions.iterator(); if (dimensionsIterator.hasNext()) { JSONObject jsonDim = new JSONObjectWithLegibleException(); packet.put(DIMENSIONS_KEY, jsonDim); for (Map.Entry<String, String> dimensionEntry : metricDimensions) { jsonDim.put(dimensionEntry.getKey(), dimensionEntry.getValue()); } } } private void addMetrics(MetricSet metricSet, JSONObjectWithLegibleException packet) throws JSONException { JSONObjectWithLegibleException metrics = new JSONObjectWithLegibleException(); packet.put(METRICS_KEY, metrics); for (Map.Entry<String, MetricValue> metric : metricSet) { String name = metric.getKey(); MetricValue value = metric.getValue(); if (value instanceof CountMetric) { metrics.put(name + ".count", ((CountMetric) value).getCount()); } else if (value instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) value; metrics.put(name + ".average", gauge.getAverage()) .put(name + ".last", gauge.getLast()) .put(name + ".max", gauge.getMax()); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { metrics.put(name + "." + prefixAndValue.first + "percentile", prefixAndValue.second.doubleValue()); } } } else { throw new UnsupportedOperationException("Unknown metric class: " + value.getClass().getName()); } } } }
class MetricsPacketsHandler extends AbstractRequestHandler { static final String APPLICATION_KEY = "application"; static final String TIMESTAMP_KEY = "timestamp"; static final String STATUS_CODE_KEY = "status_code"; static final String STATUS_MSG_KEY = "status_msg"; static final String METRICS_KEY = "metrics"; static final String DIMENSIONS_KEY = "dimensions"; static final String PACKET_SEPARATOR = "\n\n"; private final StateMonitor monitor; private final Timer timer; private final SnapshotProvider snapshotPreprocessor; private final String applicationName; @Inject public MetricsPacketsHandler(StateMonitor monitor, Timer timer, ComponentRegistry<SnapshotProvider> preprocessors, MetricsPresentationConfig presentation, MetricsPacketsHandlerConfig config) { this.monitor = monitor; this.timer = timer; snapshotPreprocessor = getSnapshotPreprocessor(preprocessors, presentation); applicationName = config.application(); } @Override private byte[] buildMetricOutput() { try { String output = getStatusPacket() + getAllMetricsPackets(); return output.getBytes(StandardCharsets.UTF_8); } catch (JSONException e) { throw new RuntimeException("Bad JSON construction.", e); } } /** * Exactly one status packet is added to the response. */ private String getStatusPacket() throws JSONException { JSONObject packet = new JSONObjectWithLegibleException(); packet.put(APPLICATION_KEY, applicationName); StateMonitor.Status status = monitor.status(); packet.put(STATUS_CODE_KEY, status.ordinal()); packet.put(STATUS_MSG_KEY, status.name()); return jsonToString(packet); } private String jsonToString(JSONObject jsonObject) throws JSONException { return jsonObject.toString(4); } private String getAllMetricsPackets() throws JSONException { StringBuilder ret = new StringBuilder(); List<JSONObject> metricsPackets = getPacketsForSnapshot(getSnapshot(), applicationName, timer.currentTimeMillis()); for (JSONObject packet : metricsPackets) { ret.append(PACKET_SEPARATOR); ret.append(jsonToString(packet)); } return ret.toString(); } private MetricSnapshot getSnapshot() { if (snapshotPreprocessor == null) { return monitor.snapshot(); } else { return snapshotPreprocessor.latestSnapshot(); } } private List<JSONObject> getPacketsForSnapshot(MetricSnapshot metricSnapshot, String application, long timestamp) throws JSONException { if (metricSnapshot == null) return Collections.emptyList(); List<JSONObject> packets = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : metricSnapshot) { MetricDimensions metricDimensions = snapshotEntry.getKey(); MetricSet metricSet = snapshotEntry.getValue(); JSONObjectWithLegibleException packet = new JSONObjectWithLegibleException(); addMetaData(timestamp, application, packet); addDimensions(metricDimensions, packet); addMetrics(metricSet, packet); packets.add(packet); } return packets; } private void addMetaData(long timestamp, String application, JSONObjectWithLegibleException packet) { packet.put(APPLICATION_KEY, application); packet.put(TIMESTAMP_KEY, timestamp); } private void addDimensions(MetricDimensions metricDimensions, JSONObjectWithLegibleException packet) throws JSONException { Iterator<Map.Entry<String, String>> dimensionsIterator = metricDimensions.iterator(); if (dimensionsIterator.hasNext()) { JSONObject jsonDim = new JSONObjectWithLegibleException(); packet.put(DIMENSIONS_KEY, jsonDim); for (Map.Entry<String, String> dimensionEntry : metricDimensions) { jsonDim.put(dimensionEntry.getKey(), dimensionEntry.getValue()); } } } private void addMetrics(MetricSet metricSet, JSONObjectWithLegibleException packet) throws JSONException { JSONObjectWithLegibleException metrics = new JSONObjectWithLegibleException(); packet.put(METRICS_KEY, metrics); for (Map.Entry<String, MetricValue> metric : metricSet) { String name = metric.getKey(); MetricValue value = metric.getValue(); if (value instanceof CountMetric) { metrics.put(name + ".count", ((CountMetric) value).getCount()); } else if (value instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) value; metrics.put(name + ".average", gauge.getAverage()) .put(name + ".last", gauge.getLast()) .put(name + ".max", gauge.getMax()); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { metrics.put(name + "." + prefixAndValue.first + "percentile", prefixAndValue.second.doubleValue()); } } } else { throw new UnsupportedOperationException("Unknown metric class: " + value.getClass().getName()); } } } }
Having multiple top-level json objects concatenated will require more complex parse login on the client side. If possible, consider wrapping all top-level objects into a single json object.
private byte[] buildMetricOutput() { try { String output = getStatusPacket() + getAllMetricsPackets(); return output.getBytes(StandardCharsets.UTF_8); } catch (JSONException e) { throw new RuntimeException("Bad JSON construction.", e); } }
String output = getStatusPacket() + getAllMetricsPackets();
private byte[] buildMetricOutput() { try { String output = getStatusPacket() + getAllMetricsPackets(); return output.getBytes(StandardCharsets.UTF_8); } catch (JSONException e) { throw new RuntimeException("Bad JSON construction.", e); } }
class MetricsPacketsHandler extends AbstractRequestHandler { static final String APPLICATION_KEY = "application"; static final String TIMESTAMP_KEY = "timestamp"; static final String STATUS_CODE_KEY = "status_code"; static final String STATUS_MSG_KEY = "status_msg"; static final String METRICS_KEY = "metrics"; static final String DIMENSIONS_KEY = "dimensions"; private final StateMonitor monitor; private final Timer timer; private final SnapshotProvider snapshotPreprocessor; private final String applicationName; @Inject public MetricsPacketsHandler(StateMonitor monitor, Timer timer, ComponentRegistry<SnapshotProvider> preprocessors, MetricsPresentationConfig presentation, MetricsPacketsHandlerConfig config) { this.monitor = monitor; this.timer = timer; snapshotPreprocessor = getSnapshotPreprocessor(preprocessors, presentation); applicationName = config.application(); } @Override public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, "application/json"); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(ByteBuffer.wrap(buildMetricOutput())); } }.dispatch(handler); return null; } /** * Exactly one status packet is added to the response. */ private String getStatusPacket() throws JSONException { JSONObject packet = new JSONObjectWithLegibleException(); packet.put(APPLICATION_KEY, applicationName); StateMonitor.Status status = monitor.status(); packet.put(STATUS_CODE_KEY, status.ordinal()); packet.put(STATUS_MSG_KEY, status.name()); return jsonToString(packet); } private String jsonToString(JSONObject jsonObject) throws JSONException { return jsonObject.toString(4); } private String getAllMetricsPackets() throws JSONException { StringBuilder ret = new StringBuilder(); List<JSONObject> metricsPackets = getPacketsForSnapshot(getSnapshot(), applicationName, timer.currentTimeMillis()); for (JSONObject packet : metricsPackets) { ret.append("\n\n"); ret.append(jsonToString(packet)); } return ret.toString(); } private MetricSnapshot getSnapshot() { if (snapshotPreprocessor == null) { return monitor.snapshot(); } else { return snapshotPreprocessor.latestSnapshot(); } } private List<JSONObject> getPacketsForSnapshot(MetricSnapshot metricSnapshot, String application, long timestamp) throws JSONException { if (metricSnapshot == null) return Collections.emptyList(); List<JSONObject> packets = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : metricSnapshot) { MetricDimensions metricDimensions = snapshotEntry.getKey(); MetricSet metricSet = snapshotEntry.getValue(); JSONObjectWithLegibleException packet = new JSONObjectWithLegibleException(); addMetaData(timestamp, application, packet); addDimensions(metricDimensions, packet); addMetrics(metricSet, packet); packets.add(packet); } return packets; } private void addMetaData(long timestamp, String application, JSONObjectWithLegibleException packet) { packet.put(APPLICATION_KEY, application); packet.put(TIMESTAMP_KEY, timestamp); } private void addDimensions(MetricDimensions metricDimensions, JSONObjectWithLegibleException packet) throws JSONException { Iterator<Map.Entry<String, String>> dimensionsIterator = metricDimensions.iterator(); if (dimensionsIterator.hasNext()) { JSONObject jsonDim = new JSONObjectWithLegibleException(); packet.put(DIMENSIONS_KEY, jsonDim); for (Map.Entry<String, String> dimensionEntry : metricDimensions) { jsonDim.put(dimensionEntry.getKey(), dimensionEntry.getValue()); } } } private void addMetrics(MetricSet metricSet, JSONObjectWithLegibleException packet) throws JSONException { JSONObjectWithLegibleException metrics = new JSONObjectWithLegibleException(); packet.put(METRICS_KEY, metrics); for (Map.Entry<String, MetricValue> metric : metricSet) { String name = metric.getKey(); MetricValue value = metric.getValue(); if (value instanceof CountMetric) { metrics.put(name + ".count", ((CountMetric) value).getCount()); } else if (value instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) value; metrics.put(name + ".average", gauge.getAverage()) .put(name + ".last", gauge.getLast()) .put(name + ".max", gauge.getMax()); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { metrics.put(name + "." + prefixAndValue.first + "percentile", prefixAndValue.second.doubleValue()); } } } else { throw new UnsupportedOperationException("Unknown metric class: " + value.getClass().getName()); } } } }
class MetricsPacketsHandler extends AbstractRequestHandler { static final String APPLICATION_KEY = "application"; static final String TIMESTAMP_KEY = "timestamp"; static final String STATUS_CODE_KEY = "status_code"; static final String STATUS_MSG_KEY = "status_msg"; static final String METRICS_KEY = "metrics"; static final String DIMENSIONS_KEY = "dimensions"; static final String PACKET_SEPARATOR = "\n\n"; private final StateMonitor monitor; private final Timer timer; private final SnapshotProvider snapshotPreprocessor; private final String applicationName; @Inject public MetricsPacketsHandler(StateMonitor monitor, Timer timer, ComponentRegistry<SnapshotProvider> preprocessors, MetricsPresentationConfig presentation, MetricsPacketsHandlerConfig config) { this.monitor = monitor; this.timer = timer; snapshotPreprocessor = getSnapshotPreprocessor(preprocessors, presentation); applicationName = config.application(); } @Override public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, "application/json"); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(ByteBuffer.wrap(buildMetricOutput())); } }.dispatch(handler); return null; } /** * Exactly one status packet is added to the response. */ private String getStatusPacket() throws JSONException { JSONObject packet = new JSONObjectWithLegibleException(); packet.put(APPLICATION_KEY, applicationName); StateMonitor.Status status = monitor.status(); packet.put(STATUS_CODE_KEY, status.ordinal()); packet.put(STATUS_MSG_KEY, status.name()); return jsonToString(packet); } private String jsonToString(JSONObject jsonObject) throws JSONException { return jsonObject.toString(4); } private String getAllMetricsPackets() throws JSONException { StringBuilder ret = new StringBuilder(); List<JSONObject> metricsPackets = getPacketsForSnapshot(getSnapshot(), applicationName, timer.currentTimeMillis()); for (JSONObject packet : metricsPackets) { ret.append(PACKET_SEPARATOR); ret.append(jsonToString(packet)); } return ret.toString(); } private MetricSnapshot getSnapshot() { if (snapshotPreprocessor == null) { return monitor.snapshot(); } else { return snapshotPreprocessor.latestSnapshot(); } } private List<JSONObject> getPacketsForSnapshot(MetricSnapshot metricSnapshot, String application, long timestamp) throws JSONException { if (metricSnapshot == null) return Collections.emptyList(); List<JSONObject> packets = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : metricSnapshot) { MetricDimensions metricDimensions = snapshotEntry.getKey(); MetricSet metricSet = snapshotEntry.getValue(); JSONObjectWithLegibleException packet = new JSONObjectWithLegibleException(); addMetaData(timestamp, application, packet); addDimensions(metricDimensions, packet); addMetrics(metricSet, packet); packets.add(packet); } return packets; } private void addMetaData(long timestamp, String application, JSONObjectWithLegibleException packet) { packet.put(APPLICATION_KEY, application); packet.put(TIMESTAMP_KEY, timestamp); } private void addDimensions(MetricDimensions metricDimensions, JSONObjectWithLegibleException packet) throws JSONException { Iterator<Map.Entry<String, String>> dimensionsIterator = metricDimensions.iterator(); if (dimensionsIterator.hasNext()) { JSONObject jsonDim = new JSONObjectWithLegibleException(); packet.put(DIMENSIONS_KEY, jsonDim); for (Map.Entry<String, String> dimensionEntry : metricDimensions) { jsonDim.put(dimensionEntry.getKey(), dimensionEntry.getValue()); } } } private void addMetrics(MetricSet metricSet, JSONObjectWithLegibleException packet) throws JSONException { JSONObjectWithLegibleException metrics = new JSONObjectWithLegibleException(); packet.put(METRICS_KEY, metrics); for (Map.Entry<String, MetricValue> metric : metricSet) { String name = metric.getKey(); MetricValue value = metric.getValue(); if (value instanceof CountMetric) { metrics.put(name + ".count", ((CountMetric) value).getCount()); } else if (value instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) value; metrics.put(name + ".average", gauge.getAverage()) .put(name + ".last", gauge.getLast()) .put(name + ".max", gauge.getMax()); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { metrics.put(name + "." + prefixAndValue.first + "percentile", prefixAndValue.second.doubleValue()); } } } else { throw new UnsupportedOperationException("Unknown metric class: " + value.getClass().getName()); } } } }
Sorry, we can't due to internal requirements.
private byte[] buildMetricOutput() { try { String output = getStatusPacket() + getAllMetricsPackets(); return output.getBytes(StandardCharsets.UTF_8); } catch (JSONException e) { throw new RuntimeException("Bad JSON construction.", e); } }
String output = getStatusPacket() + getAllMetricsPackets();
private byte[] buildMetricOutput() { try { String output = getStatusPacket() + getAllMetricsPackets(); return output.getBytes(StandardCharsets.UTF_8); } catch (JSONException e) { throw new RuntimeException("Bad JSON construction.", e); } }
class MetricsPacketsHandler extends AbstractRequestHandler { static final String APPLICATION_KEY = "application"; static final String TIMESTAMP_KEY = "timestamp"; static final String STATUS_CODE_KEY = "status_code"; static final String STATUS_MSG_KEY = "status_msg"; static final String METRICS_KEY = "metrics"; static final String DIMENSIONS_KEY = "dimensions"; private final StateMonitor monitor; private final Timer timer; private final SnapshotProvider snapshotPreprocessor; private final String applicationName; @Inject public MetricsPacketsHandler(StateMonitor monitor, Timer timer, ComponentRegistry<SnapshotProvider> preprocessors, MetricsPresentationConfig presentation, MetricsPacketsHandlerConfig config) { this.monitor = monitor; this.timer = timer; snapshotPreprocessor = getSnapshotPreprocessor(preprocessors, presentation); applicationName = config.application(); } @Override public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, "application/json"); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(ByteBuffer.wrap(buildMetricOutput())); } }.dispatch(handler); return null; } /** * Exactly one status packet is added to the response. */ private String getStatusPacket() throws JSONException { JSONObject packet = new JSONObjectWithLegibleException(); packet.put(APPLICATION_KEY, applicationName); StateMonitor.Status status = monitor.status(); packet.put(STATUS_CODE_KEY, status.ordinal()); packet.put(STATUS_MSG_KEY, status.name()); return jsonToString(packet); } private String jsonToString(JSONObject jsonObject) throws JSONException { return jsonObject.toString(4); } private String getAllMetricsPackets() throws JSONException { StringBuilder ret = new StringBuilder(); List<JSONObject> metricsPackets = getPacketsForSnapshot(getSnapshot(), applicationName, timer.currentTimeMillis()); for (JSONObject packet : metricsPackets) { ret.append("\n\n"); ret.append(jsonToString(packet)); } return ret.toString(); } private MetricSnapshot getSnapshot() { if (snapshotPreprocessor == null) { return monitor.snapshot(); } else { return snapshotPreprocessor.latestSnapshot(); } } private List<JSONObject> getPacketsForSnapshot(MetricSnapshot metricSnapshot, String application, long timestamp) throws JSONException { if (metricSnapshot == null) return Collections.emptyList(); List<JSONObject> packets = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : metricSnapshot) { MetricDimensions metricDimensions = snapshotEntry.getKey(); MetricSet metricSet = snapshotEntry.getValue(); JSONObjectWithLegibleException packet = new JSONObjectWithLegibleException(); addMetaData(timestamp, application, packet); addDimensions(metricDimensions, packet); addMetrics(metricSet, packet); packets.add(packet); } return packets; } private void addMetaData(long timestamp, String application, JSONObjectWithLegibleException packet) { packet.put(APPLICATION_KEY, application); packet.put(TIMESTAMP_KEY, timestamp); } private void addDimensions(MetricDimensions metricDimensions, JSONObjectWithLegibleException packet) throws JSONException { Iterator<Map.Entry<String, String>> dimensionsIterator = metricDimensions.iterator(); if (dimensionsIterator.hasNext()) { JSONObject jsonDim = new JSONObjectWithLegibleException(); packet.put(DIMENSIONS_KEY, jsonDim); for (Map.Entry<String, String> dimensionEntry : metricDimensions) { jsonDim.put(dimensionEntry.getKey(), dimensionEntry.getValue()); } } } private void addMetrics(MetricSet metricSet, JSONObjectWithLegibleException packet) throws JSONException { JSONObjectWithLegibleException metrics = new JSONObjectWithLegibleException(); packet.put(METRICS_KEY, metrics); for (Map.Entry<String, MetricValue> metric : metricSet) { String name = metric.getKey(); MetricValue value = metric.getValue(); if (value instanceof CountMetric) { metrics.put(name + ".count", ((CountMetric) value).getCount()); } else if (value instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) value; metrics.put(name + ".average", gauge.getAverage()) .put(name + ".last", gauge.getLast()) .put(name + ".max", gauge.getMax()); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { metrics.put(name + "." + prefixAndValue.first + "percentile", prefixAndValue.second.doubleValue()); } } } else { throw new UnsupportedOperationException("Unknown metric class: " + value.getClass().getName()); } } } }
class MetricsPacketsHandler extends AbstractRequestHandler { static final String APPLICATION_KEY = "application"; static final String TIMESTAMP_KEY = "timestamp"; static final String STATUS_CODE_KEY = "status_code"; static final String STATUS_MSG_KEY = "status_msg"; static final String METRICS_KEY = "metrics"; static final String DIMENSIONS_KEY = "dimensions"; static final String PACKET_SEPARATOR = "\n\n"; private final StateMonitor monitor; private final Timer timer; private final SnapshotProvider snapshotPreprocessor; private final String applicationName; @Inject public MetricsPacketsHandler(StateMonitor monitor, Timer timer, ComponentRegistry<SnapshotProvider> preprocessors, MetricsPresentationConfig presentation, MetricsPacketsHandlerConfig config) { this.monitor = monitor; this.timer = timer; snapshotPreprocessor = getSnapshotPreprocessor(preprocessors, presentation); applicationName = config.application(); } @Override public ContentChannel handleRequest(Request request, ResponseHandler handler) { new ResponseDispatch() { @Override protected Response newResponse() { Response response = new Response(Response.Status.OK); response.headers().add(HttpHeaders.Names.CONTENT_TYPE, "application/json"); return response; } @Override protected Iterable<ByteBuffer> responseContent() { return Collections.singleton(ByteBuffer.wrap(buildMetricOutput())); } }.dispatch(handler); return null; } /** * Exactly one status packet is added to the response. */ private String getStatusPacket() throws JSONException { JSONObject packet = new JSONObjectWithLegibleException(); packet.put(APPLICATION_KEY, applicationName); StateMonitor.Status status = monitor.status(); packet.put(STATUS_CODE_KEY, status.ordinal()); packet.put(STATUS_MSG_KEY, status.name()); return jsonToString(packet); } private String jsonToString(JSONObject jsonObject) throws JSONException { return jsonObject.toString(4); } private String getAllMetricsPackets() throws JSONException { StringBuilder ret = new StringBuilder(); List<JSONObject> metricsPackets = getPacketsForSnapshot(getSnapshot(), applicationName, timer.currentTimeMillis()); for (JSONObject packet : metricsPackets) { ret.append(PACKET_SEPARATOR); ret.append(jsonToString(packet)); } return ret.toString(); } private MetricSnapshot getSnapshot() { if (snapshotPreprocessor == null) { return monitor.snapshot(); } else { return snapshotPreprocessor.latestSnapshot(); } } private List<JSONObject> getPacketsForSnapshot(MetricSnapshot metricSnapshot, String application, long timestamp) throws JSONException { if (metricSnapshot == null) return Collections.emptyList(); List<JSONObject> packets = new ArrayList<>(); for (Map.Entry<MetricDimensions, MetricSet> snapshotEntry : metricSnapshot) { MetricDimensions metricDimensions = snapshotEntry.getKey(); MetricSet metricSet = snapshotEntry.getValue(); JSONObjectWithLegibleException packet = new JSONObjectWithLegibleException(); addMetaData(timestamp, application, packet); addDimensions(metricDimensions, packet); addMetrics(metricSet, packet); packets.add(packet); } return packets; } private void addMetaData(long timestamp, String application, JSONObjectWithLegibleException packet) { packet.put(APPLICATION_KEY, application); packet.put(TIMESTAMP_KEY, timestamp); } private void addDimensions(MetricDimensions metricDimensions, JSONObjectWithLegibleException packet) throws JSONException { Iterator<Map.Entry<String, String>> dimensionsIterator = metricDimensions.iterator(); if (dimensionsIterator.hasNext()) { JSONObject jsonDim = new JSONObjectWithLegibleException(); packet.put(DIMENSIONS_KEY, jsonDim); for (Map.Entry<String, String> dimensionEntry : metricDimensions) { jsonDim.put(dimensionEntry.getKey(), dimensionEntry.getValue()); } } } private void addMetrics(MetricSet metricSet, JSONObjectWithLegibleException packet) throws JSONException { JSONObjectWithLegibleException metrics = new JSONObjectWithLegibleException(); packet.put(METRICS_KEY, metrics); for (Map.Entry<String, MetricValue> metric : metricSet) { String name = metric.getKey(); MetricValue value = metric.getValue(); if (value instanceof CountMetric) { metrics.put(name + ".count", ((CountMetric) value).getCount()); } else if (value instanceof GaugeMetric) { GaugeMetric gauge = (GaugeMetric) value; metrics.put(name + ".average", gauge.getAverage()) .put(name + ".last", gauge.getLast()) .put(name + ".max", gauge.getMax()); if (gauge.getPercentiles().isPresent()) { for (Tuple2<String, Double> prefixAndValue : gauge.getPercentiles().get()) { metrics.put(name + "." + prefixAndValue.first + "percentile", prefixAndValue.second.doubleValue()); } } } else { throw new UnsupportedOperationException("Unknown metric class: " + value.getClass().getName()); } } } }
You managed to find another ingenious Optional filter sequence that made me scratch my head. Well done! :-) How about ``` Optional<ApplicationVersion> outstandingChange = application.outstandingChange().application(); Change latestChange = outstandingChange.isPresent() ? application.change().with(outstandingChange.get()) : application.change(); ```
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.change(); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> !isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { Change latestChange = application.outstandingChange().application() .map(application.change()::with) .orElse(application.change()); testJobs = testJobs(application, Versions.from(latestChange, application, steps.sortedDeployments(application.productionDeployments().values()).stream().findFirst(), controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); }
Change latestChange = application.outstandingChange().application()
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.change(); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { Change latestChange = application.outstandingChange().application().isPresent() ? change.with(application.outstandingChange().application().get()) : change; testJobs = testJobs(application, Versions.from(latestChange, application, steps.sortedDeployments(application.productionDeployments().values()).stream().findFirst(), controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) { application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); if (application.get().deploymentJobs().deployedInternally()) for (Run run : jobs.active()) if (run.id().application().equals(report.applicationId())) jobs.abort(run.id()); } else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().deployedInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().deployedInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.get().change().isPresent() && ! application.get().deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.get().change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.get().change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.productionDeployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; } /** Returns whether the given job can trigger at the given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().deployedInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.change().platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if ( ! completion.isPresent() && canTrigger(jobType, versions, application)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) { application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); if (application.get().deploymentJobs().deployedInternally()) for (Run run : jobs.active()) if (run.id().application().equals(report.applicationId())) jobs.abort(run.id()); } else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().deployedInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().deployedInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.get().change().isPresent() && ! application.get().deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.get().change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.get().change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .withProjectId() .withChanges() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.productionDeployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; } /** Returns whether the given job can trigger at the given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().deployedInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.change().platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if ( ! completion.isPresent() && canTrigger(jobType, versions, application)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
Hmm ... it felt so natural. I'll consider your suggestion.
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.change(); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> !isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { Change latestChange = application.outstandingChange().application() .map(application.change()::with) .orElse(application.change()); testJobs = testJobs(application, Versions.from(latestChange, application, steps.sortedDeployments(application.productionDeployments().values()).stream().findFirst(), controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); }
Change latestChange = application.outstandingChange().application()
private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.change(); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { Change latestChange = application.outstandingChange().application().isPresent() ? change.with(application.outstandingChange().application().get()) : change; testJobs = testJobs(application, Versions.from(latestChange, application, steps.sortedDeployments(application.productionDeployments().values()).stream().findFirst(), controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) { application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); if (application.get().deploymentJobs().deployedInternally()) for (Run run : jobs.active()) if (run.id().application().equals(report.applicationId())) jobs.abort(run.id()); } else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().deployedInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().deployedInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.get().change().isPresent() && ! application.get().deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.get().change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.get().change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .withProjectId() .deploying() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.productionDeployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; } /** Returns whether the given job can trigger at the given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().deployedInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.change().platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if ( ! completion.isPresent() && canTrigger(jobType, versions, application)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) { application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); if (application.get().deploymentJobs().deployedInternally()) for (Run run : jobs.active()) if (run.id().application().equals(report.applicationId())) jobs.abort(run.id()); } else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().deployedInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().deployedInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** * Triggers a change of this application * * @param applicationId the application to trigger * @throws IllegalArgumentException if this application already has an ongoing change */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if (application.get().change().isPresent() && ! application.get().deploymentJobs().hasFailures()) throw new IllegalArgumentException("Could not start " + change + " on " + application + ": " + application.get().change() + " is already in progress"); application = application.withChange(change); if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application); }); } /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, boolean keepApplicationChange) { applications().lockOrThrow(applicationId, application -> { applications().store(application.withChange(application.get().change().application() .filter(__ -> keepApplicationChange) .map(Change::of) .orElse(Change.empty()))); }); } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .withProjectId() .withChanges() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.productionDeployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; } /** Returns whether the given job can trigger at the given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().deployedInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.change().platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if ( ! completion.isPresent() && canTrigger(jobType, versions, application)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
"Merging" changes like this is really what you are doing in that code I commented on earlier today as well. It would be nice to move that into Change. Perhaps 3 methods, one taking a Change and two taking Optionals containing the two kinds of change. "updatedWith" is the best name suggestion I can come up with.
public void forceChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { Change current = application.get().change(); if (change.platform().isPresent()) current = current.with(change.platform().get()); if (change.application().isPresent()) current = current.with(change.application().get()); applications().store(application.withChange(current)); }); }
Change current = application.get().change();
public void forceChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { Change current = application.get().change(); if (change.platform().isPresent()) current = current.with(change.platform().get()); if (change.application().isPresent()) current = current.with(change.application().get()); applications().store(application.withChange(current)); }); }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) { application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); if (application.get().deploymentJobs().deployedInternally()) for (Run run : jobs.active()) if (run.id().application().equals(report.applicationId())) jobs.abort(run.id()); } else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().deployedInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().deployedInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** Triggers a change of this application, unless it already has a change. */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if ( ! application.get().change().isPresent()) { if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application.withChange(change)); } }); } /** Overrides the given application's platform and application changes with any contained in the given change. */ /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, ChangesToCancel cancellation) { applications().lockOrThrow(applicationId, application -> { Change change; switch (cancellation) { case ALL: change = Change.empty(); break; case PLATFORM: change = application.get().change().withoutPlatform(); break; case APPLICATION: change = application.get().change().withoutApplication(); break; default: throw new IllegalArgumentException("Unknown cancellation choice '" + cancellation + "'!"); } applications().store(application.withChange(change)); }); } public enum ChangesToCancel { ALL, PLATFORM, APPLICATION } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .withProjectId() .withChanges() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.change(); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { Change latestChange = application.outstandingChange().application().isPresent() ? change.with(application.outstandingChange().application().get()) : change; testJobs = testJobs(application, Versions.from(latestChange, application, steps.sortedDeployments(application.productionDeployments().values()).stream().findFirst(), controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.productionDeployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; } /** Returns whether the given job can trigger at the given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().deployedInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.change().platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if ( ! completion.isPresent() && canTrigger(jobType, versions, application)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) { application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); if (application.get().deploymentJobs().deployedInternally()) for (Run run : jobs.active()) if (run.id().application().equals(report.applicationId())) jobs.abort(run.id()); } else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().deployedInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().deployedInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** Triggers a change of this application, unless it already has a change. */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if ( ! application.get().change().isPresent()) { if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application.withChange(change)); } }); } /** Overrides the given application's platform and application changes with any contained in the given change. */ /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, ChangesToCancel cancellation) { applications().lockOrThrow(applicationId, application -> { Change change; switch (cancellation) { case ALL: change = Change.empty(); break; case PLATFORM: change = application.get().change().withoutPlatform(); break; case APPLICATION: change = application.get().change().withoutApplication(); break; default: throw new IllegalArgumentException("Unknown cancellation choice '" + cancellation + "'!"); } applications().store(application.withChange(change)); }); } public enum ChangesToCancel { ALL, PLATFORM, APPLICATION } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .withProjectId() .withChanges() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.change(); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { Change latestChange = application.outstandingChange().application().isPresent() ? change.with(application.outstandingChange().application().get()) : change; testJobs = testJobs(application, Versions.from(latestChange, application, steps.sortedDeployments(application.productionDeployments().values()).stream().findFirst(), controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.productionDeployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; } /** Returns whether the given job can trigger at the given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().deployedInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.change().platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if ( ! completion.isPresent() && canTrigger(jobType, versions, application)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
I like this suggestion.
public void forceChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { Change current = application.get().change(); if (change.platform().isPresent()) current = current.with(change.platform().get()); if (change.application().isPresent()) current = current.with(change.application().get()); applications().store(application.withChange(current)); }); }
Change current = application.get().change();
public void forceChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { Change current = application.get().change(); if (change.platform().isPresent()) current = current.with(change.platform().get()); if (change.application().isPresent()) current = current.with(change.application().get()); applications().store(application.withChange(current)); }); }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) { application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); if (application.get().deploymentJobs().deployedInternally()) for (Run run : jobs.active()) if (run.id().application().equals(report.applicationId())) jobs.abort(run.id()); } else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().deployedInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().deployedInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** Triggers a change of this application, unless it already has a change. */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if ( ! application.get().change().isPresent()) { if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application.withChange(change)); } }); } /** Overrides the given application's platform and application changes with any contained in the given change. */ /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, ChangesToCancel cancellation) { applications().lockOrThrow(applicationId, application -> { Change change; switch (cancellation) { case ALL: change = Change.empty(); break; case PLATFORM: change = application.get().change().withoutPlatform(); break; case APPLICATION: change = application.get().change().withoutApplication(); break; default: throw new IllegalArgumentException("Unknown cancellation choice '" + cancellation + "'!"); } applications().store(application.withChange(change)); }); } public enum ChangesToCancel { ALL, PLATFORM, APPLICATION } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .withProjectId() .withChanges() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.change(); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { Change latestChange = application.outstandingChange().application().isPresent() ? change.with(application.outstandingChange().application().get()) : change; testJobs = testJobs(application, Versions.from(latestChange, application, steps.sortedDeployments(application.productionDeployments().values()).stream().findFirst(), controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.productionDeployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; } /** Returns whether the given job can trigger at the given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().deployedInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.change().platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if ( ! completion.isPresent() && canTrigger(jobType, versions, application)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
class DeploymentTrigger { private final static Logger log = Logger.getLogger(DeploymentTrigger.class.getName()); private final Controller controller; private final Clock clock; private final BuildService buildService; private final JobController jobs; public DeploymentTrigger(Controller controller, BuildService buildService, Clock clock) { this.controller = Objects.requireNonNull(controller, "controller cannot be null"); this.clock = Objects.requireNonNull(clock, "clock cannot be null"); this.buildService = Objects.requireNonNull(buildService, "buildService cannot be null"); this.jobs = controller.jobController(); } public DeploymentSteps steps(DeploymentSpec spec) { return new DeploymentSteps(spec, controller::system); } /** * Records information when a job completes (successfully or not). This information is used when deciding what to * trigger next. */ public void notifyOfCompletion(JobReport report) { log.log(LogLevel.INFO, String.format("Notified of %s for %s of %s (%d)", report.jobError().map(e -> e.toString() + " error") .orElse("success"), report.jobType(), report.applicationId(), report.projectId())); if ( ! applications().get(report.applicationId()).isPresent()) { log.log(LogLevel.WARNING, "Ignoring completion of job of project '" + report.projectId() + "': Unknown application '" + report.applicationId() + "'"); return; } applications().lockOrThrow(report.applicationId(), application -> { JobRun triggering; if (report.jobType() == component) { ApplicationVersion applicationVersion = ApplicationVersion.from(report.sourceRevision().get(), report.buildNumber()); triggering = JobRun.triggering(application.get().oldestDeployedPlatform().orElse(controller.systemVersion()), applicationVersion, Optional.empty(), Optional.empty(), "Application commit", clock.instant()); if (report.success()) { if (acceptNewApplicationVersion(application.get())) { application = application.withChange(application.get().change().with(applicationVersion)) .withOutstandingChange(Change.empty()); if (application.get().deploymentJobs().deployedInternally()) for (Run run : jobs.active()) if (run.id().application().equals(report.applicationId())) jobs.abort(run.id()); } else application = application.withOutstandingChange(Change.of(applicationVersion)); } } else { triggering = application.get().deploymentJobs().statusOf(report.jobType()) .filter(job -> job.lastTriggered().isPresent() && job.lastCompleted() .map(completion -> ! completion.at().isAfter(job.lastTriggered().get().at())) .orElse(true)) .orElseThrow(() -> new IllegalStateException("Notified of completion of " + report.jobType().jobName() + " for " + report.applicationId() + ", but that has neither been triggered nor deployed")) .lastTriggered().get(); } application = application.withJobCompletion(report.projectId(), report.jobType(), triggering.completion(report.buildNumber(), clock.instant()), report.jobError()); application = application.withChange(remainingChange(application.get())); applications().store(application); }); } /** Returns a map of jobs that are scheduled to be run, grouped by the job type */ public Map<JobType, ? extends List<? extends BuildJob>> jobsToRun() { return computeReadyJobs().stream().collect(groupingBy(Job::jobType)); } /** * Finds and triggers jobs that can and should run but are currently not, and returns the number of triggered jobs. * * Only one job is triggered each run for test jobs, since their environments have limited capacity. */ public long triggerReadyJobs() { return computeReadyJobs().stream() .collect(partitioningBy(job -> job.jobType().isTest())) .entrySet().stream() .flatMap(entry -> (entry.getKey() ? entry.getValue().stream() .sorted(comparing(Job::isRetry) .thenComparing(Job::applicationUpgrade) .reversed() .thenComparing(Job::availableSince)) .collect(groupingBy(Job::jobType)) : entry.getValue().stream() .collect(groupingBy(Job::applicationId))) .values().stream() .map(jobs -> (Supplier<Long>) jobs.stream() .filter(this::trigger) .limit(entry.getKey() ? 1 : Long.MAX_VALUE)::count)) .parallel().map(Supplier::get).reduce(0L, Long::sum); } /** * Attempts to trigger the given job for the given application and returns the outcome. * * If the build service can not find the given job, or claims it is illegal to trigger it, * the project id is removed from the application owning the job, to prevent further trigger attempts. */ public boolean trigger(Job job) { log.log(LogLevel.INFO, String.format("Triggering %s: %s", job, job.triggering)); try { applications().lockOrThrow(job.applicationId(), application -> { if (application.get().deploymentJobs().deployedInternally()) jobs.start(job.applicationId(), job.jobType, new Versions(job.triggering.platform(), job.triggering.application(), job.triggering.sourcePlatform(), job.triggering.sourceApplication())); else buildService.trigger(job); applications().store(application.withJobTriggering(job.jobType, job.triggering)); }); return true; } catch (RuntimeException e) { log.log(LogLevel.WARNING, "Exception triggering " + job + ": " + e); if (e instanceof NoSuchElementException || e instanceof IllegalArgumentException) applications().lockOrThrow(job.applicationId(), application -> applications().store(application.withProjectId(OptionalLong.empty()))); return false; } } /** Force triggering of a job for given application. */ public List<JobType> forceTrigger(ApplicationId applicationId, JobType jobType, String user) { Application application = applications().require(applicationId); if (jobType == component) { if (application.deploymentJobs().deployedInternally()) throw new IllegalArgumentException(applicationId + " has no component job we can trigger."); buildService.trigger(BuildJob.of(applicationId, application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); return singletonList(component); } Versions versions = Versions.from(application.change(), application, deploymentFor(application, jobType), controller.systemVersion()); String reason = "Job triggered manually by " + user; return (jobType.isProduction() && ! isTested(application, versions) ? testJobs(application, versions, reason, clock.instant()).stream() : Stream.of(deploymentJob(application, versions, application.change(), jobType, reason, clock.instant()))) .peek(this::trigger) .map(Job::jobType).collect(toList()); } /** Triggers a change of this application, unless it already has a change. */ public void triggerChange(ApplicationId applicationId, Change change) { applications().lockOrThrow(applicationId, application -> { if ( ! application.get().change().isPresent()) { if (change.application().isPresent()) application = application.withOutstandingChange(Change.empty()); applications().store(application.withChange(change)); } }); } /** Overrides the given application's platform and application changes with any contained in the given change. */ /** Cancels a platform upgrade of the given application, and an application upgrade as well if {@code keepApplicationChange}. */ public void cancelChange(ApplicationId applicationId, ChangesToCancel cancellation) { applications().lockOrThrow(applicationId, application -> { Change change; switch (cancellation) { case ALL: change = Change.empty(); break; case PLATFORM: change = application.get().change().withoutPlatform(); break; case APPLICATION: change = application.get().change().withoutApplication(); break; default: throw new IllegalArgumentException("Unknown cancellation choice '" + cancellation + "'!"); } applications().store(application.withChange(change)); }); } public enum ChangesToCancel { ALL, PLATFORM, APPLICATION } private ApplicationController applications() { return controller.applications(); } private Optional<JobRun> successOn(Application application, JobType jobType, Versions versions) { return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .filter(versions::targetsMatch); } private Optional<Deployment> deploymentFor(Application application, JobType jobType) { return Optional.ofNullable(application.deployments().get(jobType.zone(controller.system()))); } private static <T extends Comparable<T>> Optional<T> max(Optional<T> o1, Optional<T> o2) { return ! o1.isPresent() ? o2 : ! o2.isPresent() ? o1 : o1.get().compareTo(o2.get()) >= 0 ? o1 : o2; } /** Returns the set of all jobs which have changes to propagate from the upstream steps. */ private List<Job> computeReadyJobs() { return ApplicationList.from(applications().asList()) .withProjectId() .withChanges() .idList().stream() .map(this::computeReadyJobs) .flatMap(Collection::stream) .collect(toList()); } /** * Finds the next step to trigger for the given application, if any, and returns these as a list. */ private List<Job> computeReadyJobs(ApplicationId id) { List<Job> jobs = new ArrayList<>(); applications().get(id).ifPresent(application -> { Change change = application.change(); Optional<Instant> completedAt = max(application.deploymentJobs().statusOf(systemTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at)), application.deploymentJobs().statusOf(stagingTest) .<Instant>flatMap(job -> job.lastSuccess().map(JobRun::at))); String reason = "New change available"; List<Job> testJobs = null; DeploymentSteps steps = steps(application.deploymentSpec()); if (change.isPresent()) { for (Step step : steps.production()) { List<JobType> stepJobs = steps.toJobs(step); List<JobType> remainingJobs = stepJobs.stream().filter(job -> ! isComplete(change, application, job)).collect(toList()); if (!remainingJobs.isEmpty()) { for (JobType job : remainingJobs) { Versions versions = Versions.from(change, application, deploymentFor(application, job), controller.systemVersion()); if (isTested(application, versions)) { if (completedAt.isPresent() && canTrigger(job, versions, application, stepJobs)) { jobs.add(deploymentJob(application, versions, change, job, reason, completedAt.get())); } if (!alreadyTriggered(application, versions)) { testJobs = emptyList(); } } else if (testJobs == null) { testJobs = testJobs(application, versions, String.format("Testing deployment for %s (%s)", job.jobName(), versions.toString()), completedAt.orElseGet(clock::instant)); } } completedAt = Optional.empty(); } else { if (stepJobs.isEmpty()) { Duration delay = ((DeploymentSpec.Delay) step).duration(); completedAt = completedAt.map(at -> at.plus(delay)).filter(at -> !at.isAfter(clock.instant())); reason += " after a delay of " + delay; } else { completedAt = stepJobs.stream().map(job -> application.deploymentJobs().statusOf(job).get().lastCompleted().get().at()).max(naturalOrder()); reason = "Available change in " + stepJobs.stream().map(JobType::jobName).collect(joining(", ")); } } } } if (testJobs == null) { Change latestChange = application.outstandingChange().application().isPresent() ? change.with(application.outstandingChange().application().get()) : change; testJobs = testJobs(application, Versions.from(latestChange, application, steps.sortedDeployments(application.productionDeployments().values()).stream().findFirst(), controller.systemVersion()), "Testing last changes outside prod", clock.instant()); } jobs.addAll(testJobs); }); return Collections.unmodifiableList(jobs); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application, List<JobType> parallelJobs) { if (jobStateOf(application, job) != idle) return false; if (parallelJobs != null && ! parallelJobs.containsAll(runningProductionJobs(application))) return false; if (job.isProduction() && isSuspendedInAnotherZone(application, job.zone(controller.system()))) return false; return triggerAt(clock.instant(), job, versions, application); } /** Returns whether given job should be triggered */ private boolean canTrigger(JobType job, Versions versions, Application application) { return canTrigger(job, versions, application, null); } private boolean isSuspendedInAnotherZone(Application application, ZoneId zone) { for (Deployment deployment : application.productionDeployments().values()) { if ( ! deployment.zone().equals(zone) && controller.applications().isSuspended(new DeploymentId(application.id(), deployment.zone()))) return true; } return false; } /** Returns whether the given job can trigger at the given instant */ public boolean triggerAt(Instant instant, JobType job, Versions versions, Application application) { Optional<JobStatus> jobStatus = application.deploymentJobs().statusOf(job); if (!jobStatus.isPresent()) return true; if (jobStatus.get().isSuccess()) return true; if (!jobStatus.get().lastCompleted().isPresent()) return true; if (!jobStatus.get().firstFailing().isPresent()) return true; if (!versions.targetsMatch(jobStatus.get().lastCompleted().get())) return true; if (application.deploymentSpec().upgradePolicy() == DeploymentSpec.UpgradePolicy.canary) return true; Instant firstFailing = jobStatus.get().firstFailing().get().at(); Instant lastCompleted = jobStatus.get().lastCompleted().get().at(); if (firstFailing.isAfter(instant.minus(Duration.ofMinutes(1)))) return true; if (job.isTest() && jobStatus.get().isOutOfCapacity()) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(1))); } if (firstFailing.isAfter(instant.minus(Duration.ofHours(1)))) { return lastCompleted.isBefore(instant.minus(Duration.ofMinutes(10))); } return lastCompleted.isBefore(instant.minus(Duration.ofHours(2))); } private List<JobType> runningProductionJobs(Application application) { return application.deploymentJobs().jobStatus().keySet().parallelStream() .filter(JobType::isProduction) .filter(job -> isRunning(application, job)) .collect(toList()); } /** Returns whether the given job is currently running; false if completed since last triggered, asking the build service otherwise. */ private boolean isRunning(Application application, JobType jobType) { return ! application.deploymentJobs().statusOf(jobType) .flatMap(job -> job.lastCompleted().map(run -> run.at().isAfter(job.lastTriggered().get().at()))) .orElse(false) && EnumSet.of(running, queued).contains(jobStateOf(application, jobType)); } private JobState jobStateOf(Application application, JobType jobType) { if (application.deploymentJobs().deployedInternally()) { Optional<Run> run = controller.jobController().last(application.id(), jobType); return run.isPresent() && ! run.get().hasEnded() ? JobState.running : JobState.idle; } return buildService.stateOf(BuildJob.of(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName())); } /** * Returns whether the given change is complete for the given application for the given job. * * Any job is complete if the given change is already successful on that job. * A production job is also considered complete if its current change is strictly dominated by what * is already deployed in its zone, i.e., no parts of the change are upgrades, and the full current * change for the application downgrades the deployment, which is an acknowledgement that the deployed * version is broken somehow, such that the job may be locked in failure until a new version is released. */ public boolean isComplete(Change change, Application application, JobType jobType) { Optional<Deployment> existingDeployment = deploymentFor(application, jobType); return application.deploymentJobs().statusOf(jobType).flatMap(JobStatus::lastSuccess) .map(job -> change.platform().map(job.platform()::equals).orElse(true) && change.application().map(job.application()::equals).orElse(true)) .orElse(false) || jobType.isProduction() && existingDeployment.map(deployment -> ! isUpgrade(change, deployment) && isDowngrade(application.change(), deployment)) .orElse(false); } private static boolean isUpgrade(Change change, Deployment deployment) { return change.upgrades(deployment.version()) || change.upgrades(deployment.applicationVersion()); } private static boolean isDowngrade(Change change, Deployment deployment) { return change.downgrades(deployment.version()) || change.downgrades(deployment.applicationVersion()); } private boolean isTested(Application application, Versions versions) { return testedIn(application, systemTest, versions) && testedIn(application, stagingTest, versions) || alreadyTriggered(application, versions); } public boolean testedIn(Application application, JobType testType, Versions versions) { if (testType == systemTest) return successOn(application, systemTest, versions).isPresent(); if (testType == stagingTest) return successOn(application, stagingTest, versions).filter(versions::sourcesMatchIfPresent).isPresent(); throw new IllegalArgumentException(testType + " is not a test job!"); } public boolean alreadyTriggered(Application application, Versions versions) { return application.deploymentJobs().jobStatus().values().stream() .filter(job -> job.type().isProduction()) .anyMatch(job -> job.lastTriggered() .filter(versions::targetsMatch) .filter(versions::sourcesMatchIfPresent) .isPresent()); } private boolean acceptNewApplicationVersion(Application application) { if ( ! application.deploymentSpec().canChangeRevisionAt(clock.instant())) return false; if (application.change().application().isPresent()) return true; if (application.deploymentJobs().hasFailures()) return true; return ! application.change().platform().isPresent(); } private Change remainingChange(Application application) { DeploymentSteps steps = steps(application.deploymentSpec()); List<JobType> jobs = steps.production().isEmpty() ? steps.testJobs() : steps.productionJobs(); Change change = application.change(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutApplication(), application, job))) change = change.withoutPlatform(); if (jobs.stream().allMatch(job -> isComplete(application.change().withoutPlatform(), application, job))) change = change.withoutApplication(); return change; } /** * Returns the list of test jobs that should run now, and that need to succeed on the given versions for it to be considered tested. */ private List<Job> testJobs(Application application, Versions versions, String reason, Instant availableSince) { List<Job> jobs = new ArrayList<>(); for (JobType jobType : steps(application.deploymentSpec()).testJobs()) { Optional<JobRun> completion = successOn(application, jobType, versions) .filter(run -> versions.sourcesMatchIfPresent(run) || jobType == systemTest); if ( ! completion.isPresent() && canTrigger(jobType, versions, application)) jobs.add(deploymentJob(application, versions, application.change(), jobType, reason, availableSince)); } return jobs; } private Job deploymentJob(Application application, Versions versions, Change change, JobType jobType, String reason, Instant availableSince) { boolean isRetry = application.deploymentJobs().statusOf(jobType) .map(JobStatus::isOutOfCapacity) .orElse(false); if (isRetry) reason += "; retrying on out of capacity"; JobRun triggering = JobRun.triggering(versions.targetPlatform(), versions.targetApplication(), versions.sourcePlatform(), versions.sourceApplication(), reason, clock.instant()); return new Job(application, triggering, jobType, availableSince, isRetry, change.application().isPresent()); } private static class Job extends BuildJob { private final JobType jobType; private final JobRun triggering; private final Instant availableSince; private final boolean isRetry; private final boolean isApplicationUpgrade; private Job(Application application, JobRun triggering, JobType jobType, Instant availableSince, boolean isRetry, boolean isApplicationUpgrade) { super(application.id(), application.deploymentJobs().projectId().getAsLong(), jobType.jobName()); this.jobType = jobType; this.triggering = triggering; this.availableSince = availableSince; this.isRetry = isRetry; this.isApplicationUpgrade = isApplicationUpgrade; } JobType jobType() { return jobType; } Instant availableSince() { return availableSince; } boolean isRetry() { return isRetry; } boolean applicationUpgrade() { return isApplicationUpgrade; } } }
:+1: :100: :1st_place_medal: :tada:
void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null); if (newState != lastState) { context.log(logger, LogLevel.INFO, "State changed: " + stateDescription(lastState) + " -> " + stateDescription(newState)); lastState = newState; } if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) return; final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } context.log(logger, LogLevel.DEBUG, "Loading new node spec: " + node.toString()); lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } }
throw new RuntimeException("UNKNOWN STATE " + node.getState().name());
void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) { context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)"); return; } final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { logChangesToNodeSpec(lastNode, node); if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private Node.State lastState = null; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); healthChecker.ifPresent(HealthChecker::close); context.log(logger, "Stopped"); } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private String stateDescription(Node.State state) { return state == null ? "[absent]" : state.toString(); } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); context.log(logger, "Stopped"); } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
This can be done a few lines below (478-486) without introducing a new variable
void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null); if (newState != lastState) { context.log(logger, LogLevel.INFO, "State changed: " + stateDescription(lastState) + " -> " + stateDescription(newState)); lastState = newState; } if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) return; final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } context.log(logger, LogLevel.DEBUG, "Loading new node spec: " + node.toString()); lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } }
Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null);
void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) { context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)"); return; } final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { logChangesToNodeSpec(lastNode, node); if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private Node.State lastState = null; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); healthChecker.ifPresent(HealthChecker::close); context.log(logger, "Stopped"); } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private String stateDescription(Node.State state) { return state == null ? "[absent]" : state.toString(); } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); context.log(logger, "Stopped"); } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
Its not clear from this code, but the `HealthChecker` is shared, so cannot be closed here.
public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); healthChecker.ifPresent(HealthChecker::close); context.log(logger, "Stopped"); }
healthChecker.ifPresent(HealthChecker::close);
public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); context.log(logger, "Stopped"); }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private Node.State lastState = null; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private String stateDescription(Node.State state) { return state == null ? "[absent]" : state.toString(); } void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null); if (newState != lastState) { context.log(logger, LogLevel.INFO, "State changed: " + stateDescription(lastState) + " -> " + stateDescription(newState)); lastState = newState; } if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) return; final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } context.log(logger, LogLevel.DEBUG, "Loading new node spec: " + node.toString()); lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) { context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)"); return; } final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { logChangesToNodeSpec(lastNode, node); if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } } private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
the next statement is a return
void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null); if (newState != lastState) { context.log(logger, LogLevel.INFO, "State changed: " + stateDescription(lastState) + " -> " + stateDescription(newState)); lastState = newState; } if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) return; final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } context.log(logger, LogLevel.DEBUG, "Loading new node spec: " + node.toString()); lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } }
Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null);
void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) { context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)"); return; } final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { logChangesToNodeSpec(lastNode, node); if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private Node.State lastState = null; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); healthChecker.ifPresent(HealthChecker::close); context.log(logger, "Stopped"); } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private String stateDescription(Node.State state) { return state == null ? "[absent]" : state.toString(); } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); context.log(logger, "Stopped"); } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
agreed, removed
public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); healthChecker.ifPresent(HealthChecker::close); context.log(logger, "Stopped"); }
healthChecker.ifPresent(HealthChecker::close);
public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); context.log(logger, "Stopped"); }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private Node.State lastState = null; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private String stateDescription(Node.State state) { return state == null ? "[absent]" : state.toString(); } void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null); if (newState != lastState) { context.log(logger, LogLevel.INFO, "State changed: " + stateDescription(lastState) + " -> " + stateDescription(newState)); lastState = newState; } if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) return; final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } context.log(logger, LogLevel.DEBUG, "Loading new node spec: " + node.toString()); lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) { context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)"); return; } final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { logChangesToNodeSpec(lastNode, node); if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } } private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
Which is fine as it means the node is removed from node-repo and will shortly be stopped (which is logged).
void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null); if (newState != lastState) { context.log(logger, LogLevel.INFO, "State changed: " + stateDescription(lastState) + " -> " + stateDescription(newState)); lastState = newState; } if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) return; final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } context.log(logger, LogLevel.DEBUG, "Loading new node spec: " + node.toString()); lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } }
Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null);
void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) { context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)"); return; } final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { logChangesToNodeSpec(lastNode, node); if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private Node.State lastState = null; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); healthChecker.ifPresent(HealthChecker::close); context.log(logger, "Stopped"); } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private String stateDescription(Node.State state) { return state == null ? "[absent]" : state.toString(); } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); context.log(logger, "Stopped"); } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
logging moved down
void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null); if (newState != lastState) { context.log(logger, LogLevel.INFO, "State changed: " + stateDescription(lastState) + " -> " + stateDescription(newState)); lastState = newState; } if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) return; final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } context.log(logger, LogLevel.DEBUG, "Loading new node spec: " + node.toString()); lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } }
Node.State newState = optionalNode.map(NodeSpec::getState).orElse(null);
void converge() { final Optional<NodeSpec> optionalNode = nodeRepository.getOptionalNode(context.hostname().value()); if (!optionalNode.isPresent() && expectNodeNotInNodeRepo) { context.log(logger, LogLevel.INFO, "Node removed from node repo (as expected)"); return; } final NodeSpec node = optionalNode.orElseThrow(() -> new IllegalStateException(String.format("Node '%s' missing from node repository", context.hostname()))); expectNodeNotInNodeRepo = false; Optional<Container> container = getContainer(); if (!node.equals(lastNode)) { logChangesToNodeSpec(lastNode, node); if (container.map(c -> c.state.isRunning()).orElse(false)) { storageMaintainer.writeMetricsConfig(context, node); } lastNode = node; } switch (node.getState()) { case ready: case reserved: case parked: case failed: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case active: storageMaintainer.handleCoreDumpsForContainer(context, node, container); storageMaintainer.getDiskUsageFor(context) .map(diskUsage -> (double) diskUsage / BYTES_IN_GB / node.getMinDiskAvailableGb()) .filter(diskUtil -> diskUtil >= 0.8) .ifPresent(diskUtil -> storageMaintainer.removeOldFilesFromNode(context)); scheduleDownLoadIfNeeded(node); if (isDownloadingImage()) { context.log(logger, LogLevel.DEBUG, "Waiting for image to download " + imageBeingDownloaded.asString()); return; } container = removeContainerIfNeededUpdateContainerState(node, container); if (! container.isPresent()) { containerState = STARTING; startContainer(node); containerState = UNKNOWN; aclMaintainer.ifPresent(AclMaintainer::converge); } startServicesIfNeeded(); resumeNodeIfNeeded(node); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.converge(context)); healthChecker.ifPresent(checker -> checker.verifyHealth(context)); updateNodeRepoWithCurrentAttributes(node); context.log(logger, "Call resume against Orchestrator"); orchestrator.resume(context.hostname().value()); break; case inactive: removeContainerIfNeededUpdateContainerState(node, container); updateNodeRepoWithCurrentAttributes(node); break; case provisioned: nodeRepository.setNodeState(context.hostname().value(), Node.State.dirty); break; case dirty: removeContainerIfNeededUpdateContainerState(node, container); context.log(logger, "State is " + node.getState() + ", will delete application storage and mark node as ready"); athenzCredentialsMaintainer.ifPresent(maintainer -> maintainer.clearCredentials(context)); storageMaintainer.archiveNodeStorage(context); updateNodeRepoWithCurrentAttributes(node); nodeRepository.setNodeState(context.hostname().value(), Node.State.ready); expectNodeNotInNodeRepo = true; break; default: throw new RuntimeException("UNKNOWN STATE " + node.getState().name()); } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private Node.State lastState = null; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); healthChecker.ifPresent(HealthChecker::close); context.log(logger, "Stopped"); } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private String stateDescription(Node.State state) { return state == null ? "[absent]" : state.toString(); } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
class NodeAgentImpl implements NodeAgent { private static final long BYTES_IN_GB = 1_000_000_000L; private static final Logger logger = Logger.getLogger(NodeAgentImpl.class.getName()); private final AtomicBoolean terminated = new AtomicBoolean(false); private boolean isFrozen = true; private boolean wantFrozen = false; private boolean workToDoNow = true; private boolean expectNodeNotInNodeRepo = false; private final Object monitor = new Object(); private DockerImage imageBeingDownloaded = null; private final NodeAgentContext context; private final NodeRepository nodeRepository; private final Orchestrator orchestrator; private final DockerOperations dockerOperations; private final StorageMaintainer storageMaintainer; private final Clock clock; private final Duration timeBetweenEachConverge; private final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer; private final Optional<AclMaintainer> aclMaintainer; private int numberOfUnhandledException = 0; private Instant lastConverge; private final Thread loopThread; private final Optional<HealthChecker> healthChecker; private final ScheduledExecutorService filebeatRestarter = Executors.newScheduledThreadPool(1, ThreadFactoryFactory.getDaemonThreadFactory("filebeatrestarter")); private Consumer<String> serviceRestarter; private Optional<Future<?>> currentFilebeatRestarter = Optional.empty(); private boolean hasResumedNode = false; private boolean hasStartedServices = true; /** * ABSENT means container is definitely absent - A container that was absent will not suddenly appear without * NodeAgent explicitly starting it. * STARTING state is set just before we attempt to start a container, if successful we move to the next state. * Otherwise we can't be certain. A container that was running a minute ago may no longer be running without * NodeAgent doing anything (container could have crashed). Therefore we always have to ask docker daemon * to get updated state of the container. */ enum ContainerState { ABSENT, STARTING, UNKNOWN } private ContainerState containerState = UNKNOWN; private NodeSpec lastNode = null; private CpuUsageReporter lastCpuMetric = new CpuUsageReporter(); public NodeAgentImpl( final NodeAgentContext context, final NodeRepository nodeRepository, final Orchestrator orchestrator, final DockerOperations dockerOperations, final StorageMaintainer storageMaintainer, final Clock clock, final Duration timeBetweenEachConverge, final Optional<AthenzCredentialsMaintainer> athenzCredentialsMaintainer, final Optional<AclMaintainer> aclMaintainer, final Optional<HealthChecker> healthChecker) { this.context = context; this.nodeRepository = nodeRepository; this.orchestrator = orchestrator; this.dockerOperations = dockerOperations; this.storageMaintainer = storageMaintainer; this.clock = clock; this.timeBetweenEachConverge = timeBetweenEachConverge; this.lastConverge = clock.instant(); this.athenzCredentialsMaintainer = athenzCredentialsMaintainer; this.aclMaintainer = aclMaintainer; this.healthChecker = healthChecker; this.loopThread = new Thread(() -> { try { while (!terminated.get()) tick(); } catch (Throwable t) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled throwable, ignoring", t); } }); this.loopThread.setName("tick-" + context.hostname()); } @Override public boolean setFrozen(boolean frozen) { synchronized (monitor) { if (wantFrozen != frozen) { wantFrozen = frozen; context.log(logger, LogLevel.DEBUG, wantFrozen ? "Freezing" : "Unfreezing"); signalWorkToBeDone(); } return isFrozen == frozen; } } @Override public void start() { context.log(logger, "Starting with interval " + timeBetweenEachConverge.toMillis() + " ms"); loopThread.start(); serviceRestarter = service -> { try { ProcessResult processResult = dockerOperations.executeCommandInContainerAsRoot( context, "service", service, "restart"); if (!processResult.isSuccess()) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service + ": " + processResult); } } catch (Exception e) { context.log(logger, LogLevel.ERROR, "Failed to restart service " + service, e); } }; } @Override public void stop() { filebeatRestarter.shutdown(); if (!terminated.compareAndSet(false, true)) { throw new RuntimeException("Can not re-stop a node agent."); } signalWorkToBeDone(); do { try { loopThread.join(); filebeatRestarter.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while waiting for converge thread and filebeatRestarter scheduler to shutdown"); } } while (loopThread.isAlive() || !filebeatRestarter.isTerminated()); context.log(logger, "Stopped"); } void startServicesIfNeeded() { if (!hasStartedServices) { context.log(logger, "Starting services"); dockerOperations.startServices(context); hasStartedServices = true; } } void resumeNodeIfNeeded(NodeSpec node) { if (!hasResumedNode) { if (!currentFilebeatRestarter.isPresent()) { storageMaintainer.writeMetricsConfig(context, node); currentFilebeatRestarter = Optional.of(filebeatRestarter.scheduleWithFixedDelay( () -> serviceRestarter.accept("filebeat"), 1, 1, TimeUnit.DAYS)); } context.log(logger, LogLevel.DEBUG, "Starting optional node program resume command"); dockerOperations.resumeNode(context); hasResumedNode = true; } } private void updateNodeRepoWithCurrentAttributes(final NodeSpec node) { final NodeAttributes currentNodeAttributes = new NodeAttributes(); final NodeAttributes newNodeAttributes = new NodeAttributes(); if (!Objects.equals(node.getCurrentRestartGeneration(), node.getWantedRestartGeneration())) { currentNodeAttributes.withRestartGeneration(node.getCurrentRestartGeneration()); newNodeAttributes.withRestartGeneration(node.getWantedRestartGeneration()); } if (!Objects.equals(node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())) { currentNodeAttributes.withRebootGeneration(node.getCurrentRebootGeneration()); newNodeAttributes.withRebootGeneration(node.getWantedRebootGeneration()); } Optional<DockerImage> actualDockerImage = node.getWantedDockerImage().filter(n -> containerState == UNKNOWN); if (!Objects.equals(node.getCurrentDockerImage(), actualDockerImage)) { currentNodeAttributes.withDockerImage(node.getCurrentDockerImage().orElse(new DockerImage(""))); newNodeAttributes.withDockerImage(actualDockerImage.orElse(new DockerImage(""))); } publishStateToNodeRepoIfChanged(currentNodeAttributes, newNodeAttributes); } private void publishStateToNodeRepoIfChanged(NodeAttributes currentAttributes, NodeAttributes newAttributes) { if (!currentAttributes.equals(newAttributes)) { context.log(logger, "Publishing new set of attributes to node repo: %s -> %s", currentAttributes, newAttributes); nodeRepository.updateNodeAttributes(context.hostname().value(), newAttributes); } } private void startContainer(NodeSpec node) { ContainerData containerData = createContainerData(context, node); dockerOperations.createContainer(context, node, containerData); dockerOperations.startContainer(context); lastCpuMetric = new CpuUsageReporter(); hasStartedServices = true; hasResumedNode = false; context.log(logger, "Container successfully started, new containerState is " + containerState); } private Optional<Container> removeContainerIfNeededUpdateContainerState(NodeSpec node, Optional<Container> existingContainer) { return existingContainer .flatMap(container -> removeContainerIfNeeded(node, container)) .map(container -> { shouldRestartServices(node).ifPresent(restartReason -> { context.log(logger, "Will restart services: " + restartReason); restartServices(node, container); }); return container; }); } private Optional<String> shouldRestartServices(NodeSpec node) { if (!node.getWantedRestartGeneration().isPresent()) return Optional.empty(); if (node.getCurrentRestartGeneration().get() < node.getWantedRestartGeneration().get()) { return Optional.of("Restart requested - wanted restart generation has been bumped: " + node.getCurrentRestartGeneration().get() + " -> " + node.getWantedRestartGeneration().get()); } return Optional.empty(); } private void restartServices(NodeSpec node, Container existingContainer) { if (existingContainer.state.isRunning() && node.getState() == Node.State.active) { context.log(logger, "Restarting services"); orchestratorSuspendNode(); dockerOperations.restartVespa(context); } } @Override public void stopServices() { context.log(logger, "Stopping services"); if (containerState == ABSENT) return; try { hasStartedServices = hasResumedNode = false; dockerOperations.stopServices(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } } @Override public void suspend() { context.log(logger, "Suspending services on node"); if (containerState == ABSENT) return; try { hasResumedNode = false; dockerOperations.suspendNode(context); } catch (ContainerNotFoundException e) { containerState = ABSENT; } catch (RuntimeException e) { context.log(logger, LogLevel.WARNING, "Failed trying to suspend container", e); } } private Optional<String> shouldRemoveContainer(NodeSpec node, Container existingContainer) { final Node.State nodeState = node.getState(); if (nodeState == Node.State.dirty || nodeState == Node.State.provisioned) { return Optional.of("Node in state " + nodeState + ", container should no longer be running"); } if (node.getWantedDockerImage().isPresent() && !node.getWantedDockerImage().get().equals(existingContainer.image)) { return Optional.of("The node is supposed to run a new Docker image: " + existingContainer.image.asString() + " -> " + node.getWantedDockerImage().get().asString()); } if (!existingContainer.state.isRunning()) { return Optional.of("Container no longer running"); } ContainerResources wantedContainerResources = ContainerResources.from( node.getMinCpuCores(), node.getMinMainMemoryAvailableGb()); if (!wantedContainerResources.equals(existingContainer.resources)) { return Optional.of("Container should be running with different resource allocation, wanted: " + wantedContainerResources + ", actual: " + existingContainer.resources); } if (node.getCurrentRebootGeneration() < node.getWantedRebootGeneration()) { return Optional.of(String.format("Container reboot wanted. Current: %d, Wanted: %d", node.getCurrentRebootGeneration(), node.getWantedRebootGeneration())); } if (containerState == STARTING) return Optional.of("Container failed to start"); return Optional.empty(); } private Optional<Container> removeContainerIfNeeded(NodeSpec node, Container existingContainer) { Optional<String> removeReason = shouldRemoveContainer(node, existingContainer); if (removeReason.isPresent()) { context.log(logger, "Will remove container: " + removeReason.get()); if (existingContainer.state.isRunning()) { if (node.getState() == Node.State.active) { orchestratorSuspendNode(); } try { if (node.getState() != Node.State.dirty) { suspend(); } stopServices(); } catch (Exception e) { context.log(logger, LogLevel.WARNING, "Failed stopping services, ignoring", e); } } stopFilebeatSchedulerIfNeeded(); storageMaintainer.handleCoreDumpsForContainer(context, node, Optional.of(existingContainer)); dockerOperations.removeContainer(context, existingContainer); containerState = ABSENT; context.log(logger, "Container successfully removed, new containerState is " + containerState); return Optional.empty(); } return Optional.of(existingContainer); } private void scheduleDownLoadIfNeeded(NodeSpec node) { if (node.getCurrentDockerImage().equals(node.getWantedDockerImage())) return; if (dockerOperations.pullImageAsyncIfNeeded(node.getWantedDockerImage().get())) { imageBeingDownloaded = node.getWantedDockerImage().get(); } else if (imageBeingDownloaded != null) { imageBeingDownloaded = null; } } private void signalWorkToBeDone() { synchronized (monitor) { if (!workToDoNow) { workToDoNow = true; context.log(logger, LogLevel.DEBUG, "Signaling work to be done"); monitor.notifyAll(); } } } void tick() { boolean isFrozenCopy; synchronized (monitor) { while (!workToDoNow) { long remainder = timeBetweenEachConverge .minus(Duration.between(lastConverge, clock.instant())) .toMillis(); if (remainder > 0) { try { monitor.wait(remainder); } catch (InterruptedException e) { context.log(logger, LogLevel.ERROR, "Interrupted while sleeping before tick, ignoring"); } } else break; } lastConverge = clock.instant(); workToDoNow = false; if (isFrozen != wantFrozen) { isFrozen = wantFrozen; context.log(logger, "Updated NodeAgent's frozen state, new value: isFrozen: " + isFrozen); } isFrozenCopy = isFrozen; } boolean converged = false; if (isFrozenCopy) { context.log(logger, LogLevel.DEBUG, "tick: isFrozen"); } else { try { converge(); converged = true; } catch (OrchestratorException e) { context.log(logger, e.getMessage()); } catch (ContainerNotFoundException e) { containerState = ABSENT; context.log(logger, LogLevel.WARNING, "Container unexpectedly gone, resetting containerState to " + containerState); } catch (DockerException e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Caught a DockerException", e); } catch (Exception e) { numberOfUnhandledException++; context.log(logger, LogLevel.ERROR, "Unhandled exception, ignoring.", e); } } } private void logChangesToNodeSpec(NodeSpec lastNode, NodeSpec node) { StringBuilder builder = new StringBuilder(); appendIfDifferent(builder, "state", lastNode, node, NodeSpec::getState); if (builder.length() > 0) { context.log(logger, LogLevel.INFO, "Changes to node: " + builder.toString()); } } private static <T> String fieldDescription(T value) { return value == null ? "[absent]" : value.toString(); } private <T> void appendIfDifferent(StringBuilder builder, String name, NodeSpec oldNode, NodeSpec newNode, Function<NodeSpec, T> getter) { T oldValue = oldNode == null ? null : getter.apply(oldNode); T newValue = getter.apply(newNode); if (!Objects.equals(oldValue, newValue)) { if (builder.length() > 0) { builder.append(", "); } builder.append(name).append(" ").append(fieldDescription(oldValue)).append(" -> ").append(fieldDescription(newValue)); } } private void stopFilebeatSchedulerIfNeeded() { if (currentFilebeatRestarter.isPresent()) { currentFilebeatRestarter.get().cancel(true); currentFilebeatRestarter = Optional.empty(); } } @SuppressWarnings("unchecked") public void updateContainerNodeMetrics() { final NodeSpec node = lastNode; if (node == null || containerState != UNKNOWN) return; Optional<ContainerStats> containerStats = dockerOperations.getContainerStats(context); if (!containerStats.isPresent()) return; Dimensions.Builder dimensionsBuilder = new Dimensions.Builder() .add("host", context.hostname().value()) .add("role", SecretAgentCheckConfig.nodeTypeToRole(context.nodeType())) .add("state", node.getState().toString()); node.getParentHostname().ifPresent(parent -> dimensionsBuilder.add("parentHostname", parent)); node.getAllowedToBeDown().ifPresent(allowed -> dimensionsBuilder.add("orchestratorState", allowed ? "ALLOWED_TO_BE_DOWN" : "NO_REMARKS")); Dimensions dimensions = dimensionsBuilder.build(); ContainerStats stats = containerStats.get(); final String APP = MetricReceiverWrapper.APPLICATION_NODE; final int totalNumCpuCores = ((List<Number>) ((Map) stats.getCpuStats().get("cpu_usage")).get("percpu_usage")).size(); final long cpuContainerKernelTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("usage_in_kernelmode")).longValue(); final long cpuContainerTotalTime = ((Number) ((Map) stats.getCpuStats().get("cpu_usage")).get("total_usage")).longValue(); final long cpuSystemTotalTime = ((Number) stats.getCpuStats().get("system_cpu_usage")).longValue(); final long memoryTotalBytes = ((Number) stats.getMemoryStats().get("limit")).longValue(); final long memoryTotalBytesUsage = ((Number) stats.getMemoryStats().get("usage")).longValue(); final long memoryTotalBytesCache = ((Number) ((Map) stats.getMemoryStats().get("stats")).get("cache")).longValue(); final long diskTotalBytes = (long) (node.getMinDiskAvailableGb() * BYTES_IN_GB); final Optional<Long> diskTotalBytesUsed = storageMaintainer.getDiskUsageFor(context); lastCpuMetric.updateCpuDeltas(cpuSystemTotalTime, cpuContainerTotalTime, cpuContainerKernelTime); final double allocatedCpuRatio = node.getMinCpuCores() / totalNumCpuCores; double cpuUsageRatioOfAllocated = lastCpuMetric.getCpuUsageRatio() / allocatedCpuRatio; double cpuKernelUsageRatioOfAllocated = lastCpuMetric.getCpuKernelUsageRatio() / allocatedCpuRatio; long memoryTotalBytesUsed = memoryTotalBytesUsage - memoryTotalBytesCache; double memoryUsageRatio = (double) memoryTotalBytesUsed / memoryTotalBytes; Optional<Double> diskUsageRatio = diskTotalBytesUsed.map(used -> (double) used / diskTotalBytes); List<DimensionMetrics> metrics = new ArrayList<>(); DimensionMetrics.Builder systemMetricsBuilder = new DimensionMetrics.Builder(APP, dimensions) .withMetric("mem.limit", memoryTotalBytes) .withMetric("mem.used", memoryTotalBytesUsed) .withMetric("mem.util", 100 * memoryUsageRatio) .withMetric("cpu.util", 100 * cpuUsageRatioOfAllocated) .withMetric("cpu.sys.util", 100 * cpuKernelUsageRatioOfAllocated) .withMetric("disk.limit", diskTotalBytes); diskTotalBytesUsed.ifPresent(diskUsed -> systemMetricsBuilder.withMetric("disk.used", diskUsed)); diskUsageRatio.ifPresent(diskRatio -> systemMetricsBuilder.withMetric("disk.util", 100 * diskRatio)); metrics.add(systemMetricsBuilder.build()); stats.getNetworks().forEach((interfaceName, interfaceStats) -> { Dimensions netDims = dimensionsBuilder.add("interface", interfaceName).build(); Map<String, Number> infStats = (Map<String, Number>) interfaceStats; DimensionMetrics networkMetrics = new DimensionMetrics.Builder(APP, netDims) .withMetric("net.in.bytes", infStats.get("rx_bytes").longValue()) .withMetric("net.in.errors", infStats.get("rx_errors").longValue()) .withMetric("net.in.dropped", infStats.get("rx_dropped").longValue()) .withMetric("net.out.bytes", infStats.get("tx_bytes").longValue()) .withMetric("net.out.errors", infStats.get("tx_errors").longValue()) .withMetric("net.out.dropped", infStats.get("tx_dropped").longValue()) .build(); metrics.add(networkMetrics); }); pushMetricsToContainer(metrics); } private void pushMetricsToContainer(List<DimensionMetrics> metrics) { StringBuilder params = new StringBuilder(); try { for (DimensionMetrics dimensionMetrics : metrics) { params.append(dimensionMetrics.toSecretAgentReport()); } String wrappedMetrics = "s:" + params.toString(); String[] command = {"vespa-rpc-invoke", "-t", "2", "tcp/localhost:19091", "setExtraMetrics", wrappedMetrics}; dockerOperations.executeCommandInContainerAsRoot(context, 5L, command); } catch (DockerExecTimeoutException | JsonProcessingException e) { context.log(logger, LogLevel.WARNING, "Failed to push metrics to container", e); } } private Optional<Container> getContainer() { if (containerState == ABSENT) return Optional.empty(); Optional<Container> container = dockerOperations.getContainer(context); if (! container.isPresent()) containerState = ABSENT; return container; } @Override public boolean isDownloadingImage() { return imageBeingDownloaded != null; } @Override public int getAndResetNumberOfUnhandledExceptions() { int temp = numberOfUnhandledException; numberOfUnhandledException = 0; return temp; } class CpuUsageReporter { private long containerKernelUsage = 0; private long totalContainerUsage = 0; private long totalSystemUsage = 0; private long deltaContainerKernelUsage; private long deltaContainerUsage; private long deltaSystemUsage; private void updateCpuDeltas(long totalSystemUsage, long totalContainerUsage, long containerKernelUsage) { deltaSystemUsage = this.totalSystemUsage == 0 ? 0 : (totalSystemUsage - this.totalSystemUsage); deltaContainerUsage = totalContainerUsage - this.totalContainerUsage; deltaContainerKernelUsage = containerKernelUsage - this.containerKernelUsage; this.totalSystemUsage = totalSystemUsage; this.totalContainerUsage = totalContainerUsage; this.containerKernelUsage = containerKernelUsage; } /** * Returns the CPU usage ratio for the docker container that this NodeAgent is managing * in the time between the last two times updateCpuDeltas() was called. This is calculated * by dividing the CPU time used by the container with the CPU time used by the entire system. */ double getCpuUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerUsage / deltaSystemUsage; } double getCpuKernelUsageRatio() { return deltaSystemUsage == 0 ? Double.NaN : (double) deltaContainerKernelUsage / deltaSystemUsage; } } private void orchestratorSuspendNode() { context.log(logger, "Ask Orchestrator for permission to suspend node"); orchestrator.suspend(context.hostname().value()); } protected ContainerData createContainerData(NodeAgentContext context, NodeSpec node) { return (pathInContainer, data) -> { throw new UnsupportedOperationException("addFile not implemented"); }; } }
Now that there is a `switch`/`case` over all possibilities here, the `verifyLegalOperation()` is not needed.
private void parseComplex(Element element, List<Element> children, ConfigPayloadBuilder payloadBuilder, String parentName) { String name = extractName(element); if (element.hasAttribute("index")) { throw new IllegalArgumentException("The 'index' attribute on config elements is not supported - use <item>"); } else if (element.hasAttribute("operation")) { String operation = verifyLegalOperation(element); ConfigPayloadBuilder childPayloadBuilder; switch (operation) { case "append": childPayloadBuilder = payloadBuilder.getArray(name).append(); break; case "clear": if (payloadBuilder.arrayExists(name)) payloadBuilder.clearArray(name); childPayloadBuilder = payloadBuilder; break; default: throw new RuntimeException("Unknown operation '" + operation + "'"); } for (Element child : children) { parseElement(child, childPayloadBuilder, name); } } else if ("item".equals(name)) { if (element.hasAttribute("key")) { ConfigPayloadBuilder childPayloadBuilder = payloadBuilder.getMap(parentName).get(element.getAttribute("key")); for (Element child : children) { parseElement(child, childPayloadBuilder, parentName); } } else { ConfigPayloadBuilder.Array array = payloadBuilder.getArray(parentName); ConfigPayloadBuilder childPayloadBuilder = array.append(); for (Element child : children) { parseElement(child, childPayloadBuilder, parentName); } } } else { int numMatching = 0; for (Element child : children) { numMatching += ("item".equals(child.getTagName())) ? 1 : 0; } if (numMatching == 0) { ConfigPayloadBuilder p = payloadBuilder.getObject(name); for (Element child : children) parseElement(child, p, name); } else if (numMatching == children.size()) { for (Element child : children) { parseElement(child, payloadBuilder, name); } } else { throw new ConfigurationRuntimeException("<item> is a reserved keyword for array and map elements"); } } }
default:
private void parseComplex(Element element, List<Element> children, ConfigPayloadBuilder payloadBuilder, String parentName) { String name = extractName(element); if (element.hasAttribute("index")) { throw new IllegalArgumentException("The 'index' attribute on config elements is not supported - use <item>"); } else if (element.hasAttribute("operation")) { ConfigPayloadBuilder childPayloadBuilder = getBuilderForInnerArray(element, payloadBuilder, name); for (Element child : children) { parseElement(child, childPayloadBuilder, name); } } else if ("item".equals(name)) { if (element.hasAttribute("key")) { ConfigPayloadBuilder childPayloadBuilder = payloadBuilder.getMap(parentName).get(element.getAttribute("key")); for (Element child : children) { parseElement(child, childPayloadBuilder, parentName); } } else { ConfigPayloadBuilder.Array array = payloadBuilder.getArray(parentName); ConfigPayloadBuilder childPayloadBuilder = array.append(); for (Element child : children) { parseElement(child, childPayloadBuilder, parentName); } } } else { int numMatching = 0; for (Element child : children) { numMatching += ("item".equals(child.getTagName())) ? 1 : 0; } if (numMatching == 0) { ConfigPayloadBuilder p = payloadBuilder.getObject(name); for (Element child : children) parseElement(child, p, name); } else if (numMatching == children.size()) { for (Element child : children) { parseElement(child, payloadBuilder, name); } } else { throw new ConfigurationRuntimeException("<item> is a reserved keyword for array and map elements"); } } }
class DomConfigPayloadBuilder { private static final Logger log = Logger.getLogger(DomConfigPayloadBuilder.class.getPackage().toString()); private static final Pattern namePattern = ConfigDefinition.namePattern; private static final Pattern namespacePattern = ConfigDefinition.namespacePattern; /** The config definition, not null if not found */ private final ConfigDefinition configDefinition; public DomConfigPayloadBuilder(ConfigDefinition configDefinition) { this.configDefinition = configDefinition; } /** * Builds a {@link ConfigPayloadBuilder} representing the input 'config' xml element. * * @param configE The 'config' xml element * @return a new payload builder built from xml. */ public ConfigPayloadBuilder build(Element configE) { parseConfigName(configE); ConfigPayloadBuilder payloadBuilder = new ConfigPayloadBuilder(configDefinition); for (Element child : XML.getChildren(configE)) { parseElement(child, payloadBuilder, null); } return payloadBuilder; } public static ConfigDefinitionKey parseConfigName(Element configE) { if (!configE.getNodeName().equals("config")) { throw new ConfigurationRuntimeException("The root element must be 'config', but was '" + configE.getNodeName() + "'."); } if (!configE.hasAttribute("name")) { throw new ConfigurationRuntimeException ("The 'config' element must have a 'name' attribute that matches the name of the config definition."); } String xmlName = configE.getAttribute("name"); final boolean xmlNamespaceAttributeExists = configE.hasAttribute("namespace"); String xmlNamespace = null; if (xmlName.contains(".")) { Tuple2<String, String> t = ConfigUtils.getNameAndNamespaceFromString(xmlName); xmlName = t.first; xmlNamespace = t.second; } else { if (!xmlNamespaceAttributeExists) { log.log(LogLevel.WARNING, "No namespace in 'config name=" + xmlName + "', please specify one"); } } if (!validName(xmlName)) { throw new ConfigurationRuntimeException("The config name '" + xmlName + "' contains illegal characters. Only names with the pattern " + namePattern.toString() + " are legal."); } if (xmlNamespace == null) { xmlNamespace = configE.getAttribute("namespace"); if (xmlNamespace == null || xmlNamespace.isEmpty()) { xmlNamespace = CNode.DEFAULT_NAMESPACE; } } if (!validNamespace(xmlNamespace)) { throw new ConfigurationRuntimeException("The config namespace '" + xmlNamespace + "' contains illegal characters. Only namespaces with the pattern " + namespacePattern.toString() + " are legal."); } return new ConfigDefinitionKey(xmlName, xmlNamespace); } private static boolean validName(String name) { Matcher m = namePattern.matcher(name); return m.matches(); } private static boolean validNamespace(String namespace) { Matcher m = namespacePattern.matcher(namespace); return m.matches(); } private String extractName(Element element) { String initial = element.getNodeName(); if (initial.indexOf('-') < 0) { return initial; } StringBuilder buf = new StringBuilder(); boolean upcase = false; for (char ch : initial.toCharArray()) { if (ch == '-') { upcase = true; } else if (upcase && ch >= 'a' && ch <= 'z') { buf.append((char)('A' + ch - 'a')); upcase = false; } else { buf.append(ch); upcase = false; } } return buf.toString(); } /** * Parse leaf value in an xml tree */ private void parseLeaf(Element element, ConfigPayloadBuilder payloadBuilder, String parentName) { String name = extractName(element); String value = XML.getValue(element); if (value == null) { throw new ConfigurationRuntimeException("Element '" + name + "' must have either children or a value"); } if (element.hasAttribute("index")) { throw new IllegalArgumentException("The 'index' attribute on config elements is not supported - use <item>"); } else if (element.hasAttribute("operation")) { verifyLegalOperation(element); ConfigPayloadBuilder.Array a = payloadBuilder.getArray(name); a.append(value); } else if ("item".equals(name)) { if (parentName == null) throw new ConfigurationRuntimeException("<item> is a reserved keyword for array and map elements"); if (element.hasAttribute("key")) { payloadBuilder.getMap(parentName).put(element.getAttribute("key"), value); } else { payloadBuilder.getArray(parentName).append(value); } } else { payloadBuilder.setField(name, value); } } /** * Adds the values and children (recursively) in the given xml element to the given {@link ConfigPayloadBuilder}. * @param currElem The element representing a config parameter. * @param payloadBuilder The builder to use when adding elements. */ private void parseElement(Element currElem, ConfigPayloadBuilder payloadBuilder, String parentName) { List<Element> children = XML.getChildren(currElem); try { if (children.isEmpty()) { parseLeaf(currElem, payloadBuilder, parentName); } else { parseComplex(currElem, children, payloadBuilder, parentName); } } catch (Exception exception) { throw new ConfigurationRuntimeException("Error parsing element at " + XML.getNodePath(currElem, " > ") + ": " + Exceptions.toMessageString(exception)); } } private String verifyLegalOperation(Element currElem) { String operation = currElem.getAttribute("operation"); if (! Arrays.asList("append", "clear").contains(operation)) throw new ConfigurationRuntimeException("The supported array operations are 'append' and 'clear', got '" + operation + "' at XML node '" + XML.getNodePath(currElem, " > ") + "'."); return operation; } }
class DomConfigPayloadBuilder { private static final Logger log = Logger.getLogger(DomConfigPayloadBuilder.class.getPackage().toString()); private static final Pattern namePattern = ConfigDefinition.namePattern; private static final Pattern namespacePattern = ConfigDefinition.namespacePattern; /** The config definition, not null if not found */ private final ConfigDefinition configDefinition; public DomConfigPayloadBuilder(ConfigDefinition configDefinition) { this.configDefinition = configDefinition; } /** * Builds a {@link ConfigPayloadBuilder} representing the input 'config' xml element. * * @param configE The 'config' xml element * @return a new payload builder built from xml. */ public ConfigPayloadBuilder build(Element configE) { parseConfigName(configE); ConfigPayloadBuilder payloadBuilder = new ConfigPayloadBuilder(configDefinition); for (Element child : XML.getChildren(configE)) { parseElement(child, payloadBuilder, null); } return payloadBuilder; } public static ConfigDefinitionKey parseConfigName(Element configE) { if (!configE.getNodeName().equals("config")) { throw new ConfigurationRuntimeException("The root element must be 'config', but was '" + configE.getNodeName() + "'."); } if (!configE.hasAttribute("name")) { throw new ConfigurationRuntimeException ("The 'config' element must have a 'name' attribute that matches the name of the config definition."); } String xmlName = configE.getAttribute("name"); final boolean xmlNamespaceAttributeExists = configE.hasAttribute("namespace"); String xmlNamespace = null; if (xmlName.contains(".")) { Tuple2<String, String> t = ConfigUtils.getNameAndNamespaceFromString(xmlName); xmlName = t.first; xmlNamespace = t.second; } else { if (!xmlNamespaceAttributeExists) { log.log(LogLevel.WARNING, "No namespace in 'config name=" + xmlName + "', please specify one"); } } if (!validName(xmlName)) { throw new ConfigurationRuntimeException("The config name '" + xmlName + "' contains illegal characters. Only names with the pattern " + namePattern.toString() + " are legal."); } if (xmlNamespace == null) { xmlNamespace = configE.getAttribute("namespace"); if (xmlNamespace == null || xmlNamespace.isEmpty()) { xmlNamespace = CNode.DEFAULT_NAMESPACE; } } if (!validNamespace(xmlNamespace)) { throw new ConfigurationRuntimeException("The config namespace '" + xmlNamespace + "' contains illegal characters. Only namespaces with the pattern " + namespacePattern.toString() + " are legal."); } return new ConfigDefinitionKey(xmlName, xmlNamespace); } private static boolean validName(String name) { Matcher m = namePattern.matcher(name); return m.matches(); } private static boolean validNamespace(String namespace) { Matcher m = namespacePattern.matcher(namespace); return m.matches(); } private String extractName(Element element) { String initial = element.getNodeName(); if (initial.indexOf('-') < 0) { return initial; } StringBuilder buf = new StringBuilder(); boolean upcase = false; for (char ch : initial.toCharArray()) { if (ch == '-') { upcase = true; } else if (upcase && ch >= 'a' && ch <= 'z') { buf.append((char)('A' + ch - 'a')); upcase = false; } else { buf.append(ch); upcase = false; } } return buf.toString(); } /** * Parse leaf value in an xml tree */ private void parseLeaf(Element element, ConfigPayloadBuilder payloadBuilder, String parentName) { String name = extractName(element); String value = XML.getValue(element); if (value == null) { throw new ConfigurationRuntimeException("Element '" + name + "' must have either children or a value"); } if (element.hasAttribute("index")) { throw new IllegalArgumentException("The 'index' attribute on config elements is not supported - use <item>"); } else if (element.hasAttribute("operation")) { verifyLegalOperation(element); ConfigPayloadBuilder.Array a = payloadBuilder.getArray(name); a.append(value); } else if ("item".equals(name)) { if (parentName == null) throw new ConfigurationRuntimeException("<item> is a reserved keyword for array and map elements"); if (element.hasAttribute("key")) { payloadBuilder.getMap(parentName).put(element.getAttribute("key"), value); } else { payloadBuilder.getArray(parentName).append(value); } } else { payloadBuilder.setField(name, value); } } /** * Adds the values and children (recursively) in the given xml element to the given {@link ConfigPayloadBuilder}. * @param currElem The element representing a config parameter. * @param payloadBuilder The builder to use when adding elements. */ private void parseElement(Element currElem, ConfigPayloadBuilder payloadBuilder, String parentName) { List<Element> children = XML.getChildren(currElem); try { if (children.isEmpty()) { parseLeaf(currElem, payloadBuilder, parentName); } else { parseComplex(currElem, children, payloadBuilder, parentName); } } catch (Exception exception) { throw new ConfigurationRuntimeException("Error parsing element at " + XML.getNodePath(currElem, " > ") + ": " + Exceptions.toMessageString(exception)); } } private void verifyLegalOperation(Element currElem) { String operation = currElem.getAttribute("operation"); if (! operation.equalsIgnoreCase("append")) throw new ConfigurationRuntimeException("The only supported array operation is 'append', got '" + operation + "' at XML node '" + XML.getNodePath(currElem, " > ") + "'."); } private ConfigPayloadBuilder getBuilderForInnerArray(Element element, ConfigPayloadBuilder payloadBuilder, String arrayName) { String operation = element.getAttribute("operation").toLowerCase(); ConfigPayloadBuilder arrayPayloadBuilder; switch (operation) { case "append": arrayPayloadBuilder = payloadBuilder.getArray(arrayName).append(); break; case "clear": payloadBuilder.removeArray(arrayName); arrayPayloadBuilder = payloadBuilder; break; default: throw new RuntimeException("Unknown operation '" + operation + "' at XML node '" + XML.getNodePath(element, " > ") + "'."); } return arrayPayloadBuilder; } }
Lowercase?
private void parseComplex(Element element, List<Element> children, ConfigPayloadBuilder payloadBuilder, String parentName) { String name = extractName(element); if (element.hasAttribute("index")) { throw new IllegalArgumentException("The 'index' attribute on config elements is not supported - use <item>"); } else if (element.hasAttribute("operation")) { String operation = element.getAttribute("operation"); ConfigPayloadBuilder childPayloadBuilder; switch (operation) { case "append": childPayloadBuilder = payloadBuilder.getArray(name).append(); break; case "clear": payloadBuilder.removeArray(name); childPayloadBuilder = payloadBuilder; break; default: throw new RuntimeException("Unknown operation '" + operation + "'"); } for (Element child : children) { parseElement(child, childPayloadBuilder, name); } } else if ("item".equals(name)) { if (element.hasAttribute("key")) { ConfigPayloadBuilder childPayloadBuilder = payloadBuilder.getMap(parentName).get(element.getAttribute("key")); for (Element child : children) { parseElement(child, childPayloadBuilder, parentName); } } else { ConfigPayloadBuilder.Array array = payloadBuilder.getArray(parentName); ConfigPayloadBuilder childPayloadBuilder = array.append(); for (Element child : children) { parseElement(child, childPayloadBuilder, parentName); } } } else { int numMatching = 0; for (Element child : children) { numMatching += ("item".equals(child.getTagName())) ? 1 : 0; } if (numMatching == 0) { ConfigPayloadBuilder p = payloadBuilder.getObject(name); for (Element child : children) parseElement(child, p, name); } else if (numMatching == children.size()) { for (Element child : children) { parseElement(child, payloadBuilder, name); } } else { throw new ConfigurationRuntimeException("<item> is a reserved keyword for array and map elements"); } } }
String operation = element.getAttribute("operation");
private void parseComplex(Element element, List<Element> children, ConfigPayloadBuilder payloadBuilder, String parentName) { String name = extractName(element); if (element.hasAttribute("index")) { throw new IllegalArgumentException("The 'index' attribute on config elements is not supported - use <item>"); } else if (element.hasAttribute("operation")) { ConfigPayloadBuilder childPayloadBuilder = getBuilderForInnerArray(element, payloadBuilder, name); for (Element child : children) { parseElement(child, childPayloadBuilder, name); } } else if ("item".equals(name)) { if (element.hasAttribute("key")) { ConfigPayloadBuilder childPayloadBuilder = payloadBuilder.getMap(parentName).get(element.getAttribute("key")); for (Element child : children) { parseElement(child, childPayloadBuilder, parentName); } } else { ConfigPayloadBuilder.Array array = payloadBuilder.getArray(parentName); ConfigPayloadBuilder childPayloadBuilder = array.append(); for (Element child : children) { parseElement(child, childPayloadBuilder, parentName); } } } else { int numMatching = 0; for (Element child : children) { numMatching += ("item".equals(child.getTagName())) ? 1 : 0; } if (numMatching == 0) { ConfigPayloadBuilder p = payloadBuilder.getObject(name); for (Element child : children) parseElement(child, p, name); } else if (numMatching == children.size()) { for (Element child : children) { parseElement(child, payloadBuilder, name); } } else { throw new ConfigurationRuntimeException("<item> is a reserved keyword for array and map elements"); } } }
class DomConfigPayloadBuilder { private static final Logger log = Logger.getLogger(DomConfigPayloadBuilder.class.getPackage().toString()); private static final Pattern namePattern = ConfigDefinition.namePattern; private static final Pattern namespacePattern = ConfigDefinition.namespacePattern; /** The config definition, not null if not found */ private final ConfigDefinition configDefinition; public DomConfigPayloadBuilder(ConfigDefinition configDefinition) { this.configDefinition = configDefinition; } /** * Builds a {@link ConfigPayloadBuilder} representing the input 'config' xml element. * * @param configE The 'config' xml element * @return a new payload builder built from xml. */ public ConfigPayloadBuilder build(Element configE) { parseConfigName(configE); ConfigPayloadBuilder payloadBuilder = new ConfigPayloadBuilder(configDefinition); for (Element child : XML.getChildren(configE)) { parseElement(child, payloadBuilder, null); } return payloadBuilder; } public static ConfigDefinitionKey parseConfigName(Element configE) { if (!configE.getNodeName().equals("config")) { throw new ConfigurationRuntimeException("The root element must be 'config', but was '" + configE.getNodeName() + "'."); } if (!configE.hasAttribute("name")) { throw new ConfigurationRuntimeException ("The 'config' element must have a 'name' attribute that matches the name of the config definition."); } String xmlName = configE.getAttribute("name"); final boolean xmlNamespaceAttributeExists = configE.hasAttribute("namespace"); String xmlNamespace = null; if (xmlName.contains(".")) { Tuple2<String, String> t = ConfigUtils.getNameAndNamespaceFromString(xmlName); xmlName = t.first; xmlNamespace = t.second; } else { if (!xmlNamespaceAttributeExists) { log.log(LogLevel.WARNING, "No namespace in 'config name=" + xmlName + "', please specify one"); } } if (!validName(xmlName)) { throw new ConfigurationRuntimeException("The config name '" + xmlName + "' contains illegal characters. Only names with the pattern " + namePattern.toString() + " are legal."); } if (xmlNamespace == null) { xmlNamespace = configE.getAttribute("namespace"); if (xmlNamespace == null || xmlNamespace.isEmpty()) { xmlNamespace = CNode.DEFAULT_NAMESPACE; } } if (!validNamespace(xmlNamespace)) { throw new ConfigurationRuntimeException("The config namespace '" + xmlNamespace + "' contains illegal characters. Only namespaces with the pattern " + namespacePattern.toString() + " are legal."); } return new ConfigDefinitionKey(xmlName, xmlNamespace); } private static boolean validName(String name) { Matcher m = namePattern.matcher(name); return m.matches(); } private static boolean validNamespace(String namespace) { Matcher m = namespacePattern.matcher(namespace); return m.matches(); } private String extractName(Element element) { String initial = element.getNodeName(); if (initial.indexOf('-') < 0) { return initial; } StringBuilder buf = new StringBuilder(); boolean upcase = false; for (char ch : initial.toCharArray()) { if (ch == '-') { upcase = true; } else if (upcase && ch >= 'a' && ch <= 'z') { buf.append((char)('A' + ch - 'a')); upcase = false; } else { buf.append(ch); upcase = false; } } return buf.toString(); } /** * Parse leaf value in an xml tree */ private void parseLeaf(Element element, ConfigPayloadBuilder payloadBuilder, String parentName) { String name = extractName(element); String value = XML.getValue(element); if (value == null) { throw new ConfigurationRuntimeException("Element '" + name + "' must have either children or a value"); } if (element.hasAttribute("index")) { throw new IllegalArgumentException("The 'index' attribute on config elements is not supported - use <item>"); } else if (element.hasAttribute("operation")) { verifyLegalOperation(element); ConfigPayloadBuilder.Array a = payloadBuilder.getArray(name); a.append(value); } else if ("item".equals(name)) { if (parentName == null) throw new ConfigurationRuntimeException("<item> is a reserved keyword for array and map elements"); if (element.hasAttribute("key")) { payloadBuilder.getMap(parentName).put(element.getAttribute("key"), value); } else { payloadBuilder.getArray(parentName).append(value); } } else { payloadBuilder.setField(name, value); } } /** * Adds the values and children (recursively) in the given xml element to the given {@link ConfigPayloadBuilder}. * @param currElem The element representing a config parameter. * @param payloadBuilder The builder to use when adding elements. */ private void parseElement(Element currElem, ConfigPayloadBuilder payloadBuilder, String parentName) { List<Element> children = XML.getChildren(currElem); try { if (children.isEmpty()) { parseLeaf(currElem, payloadBuilder, parentName); } else { parseComplex(currElem, children, payloadBuilder, parentName); } } catch (Exception exception) { throw new ConfigurationRuntimeException("Error parsing element at " + XML.getNodePath(currElem, " > ") + ": " + Exceptions.toMessageString(exception)); } } private String verifyLegalOperation(Element currElem) { String operation = currElem.getAttribute("operation"); if (! Arrays.asList("append", "clear").contains(operation.toLowerCase())) throw new ConfigurationRuntimeException("The supported array operations are 'append' and 'clear', got '" + operation + "' at XML node '" + XML.getNodePath(currElem, " > ") + "'."); return operation; } }
class DomConfigPayloadBuilder { private static final Logger log = Logger.getLogger(DomConfigPayloadBuilder.class.getPackage().toString()); private static final Pattern namePattern = ConfigDefinition.namePattern; private static final Pattern namespacePattern = ConfigDefinition.namespacePattern; /** The config definition, not null if not found */ private final ConfigDefinition configDefinition; public DomConfigPayloadBuilder(ConfigDefinition configDefinition) { this.configDefinition = configDefinition; } /** * Builds a {@link ConfigPayloadBuilder} representing the input 'config' xml element. * * @param configE The 'config' xml element * @return a new payload builder built from xml. */ public ConfigPayloadBuilder build(Element configE) { parseConfigName(configE); ConfigPayloadBuilder payloadBuilder = new ConfigPayloadBuilder(configDefinition); for (Element child : XML.getChildren(configE)) { parseElement(child, payloadBuilder, null); } return payloadBuilder; } public static ConfigDefinitionKey parseConfigName(Element configE) { if (!configE.getNodeName().equals("config")) { throw new ConfigurationRuntimeException("The root element must be 'config', but was '" + configE.getNodeName() + "'."); } if (!configE.hasAttribute("name")) { throw new ConfigurationRuntimeException ("The 'config' element must have a 'name' attribute that matches the name of the config definition."); } String xmlName = configE.getAttribute("name"); final boolean xmlNamespaceAttributeExists = configE.hasAttribute("namespace"); String xmlNamespace = null; if (xmlName.contains(".")) { Tuple2<String, String> t = ConfigUtils.getNameAndNamespaceFromString(xmlName); xmlName = t.first; xmlNamespace = t.second; } else { if (!xmlNamespaceAttributeExists) { log.log(LogLevel.WARNING, "No namespace in 'config name=" + xmlName + "', please specify one"); } } if (!validName(xmlName)) { throw new ConfigurationRuntimeException("The config name '" + xmlName + "' contains illegal characters. Only names with the pattern " + namePattern.toString() + " are legal."); } if (xmlNamespace == null) { xmlNamespace = configE.getAttribute("namespace"); if (xmlNamespace == null || xmlNamespace.isEmpty()) { xmlNamespace = CNode.DEFAULT_NAMESPACE; } } if (!validNamespace(xmlNamespace)) { throw new ConfigurationRuntimeException("The config namespace '" + xmlNamespace + "' contains illegal characters. Only namespaces with the pattern " + namespacePattern.toString() + " are legal."); } return new ConfigDefinitionKey(xmlName, xmlNamespace); } private static boolean validName(String name) { Matcher m = namePattern.matcher(name); return m.matches(); } private static boolean validNamespace(String namespace) { Matcher m = namespacePattern.matcher(namespace); return m.matches(); } private String extractName(Element element) { String initial = element.getNodeName(); if (initial.indexOf('-') < 0) { return initial; } StringBuilder buf = new StringBuilder(); boolean upcase = false; for (char ch : initial.toCharArray()) { if (ch == '-') { upcase = true; } else if (upcase && ch >= 'a' && ch <= 'z') { buf.append((char)('A' + ch - 'a')); upcase = false; } else { buf.append(ch); upcase = false; } } return buf.toString(); } /** * Parse leaf value in an xml tree */ private void parseLeaf(Element element, ConfigPayloadBuilder payloadBuilder, String parentName) { String name = extractName(element); String value = XML.getValue(element); if (value == null) { throw new ConfigurationRuntimeException("Element '" + name + "' must have either children or a value"); } if (element.hasAttribute("index")) { throw new IllegalArgumentException("The 'index' attribute on config elements is not supported - use <item>"); } else if (element.hasAttribute("operation")) { verifyLegalOperation(element); ConfigPayloadBuilder.Array a = payloadBuilder.getArray(name); a.append(value); } else if ("item".equals(name)) { if (parentName == null) throw new ConfigurationRuntimeException("<item> is a reserved keyword for array and map elements"); if (element.hasAttribute("key")) { payloadBuilder.getMap(parentName).put(element.getAttribute("key"), value); } else { payloadBuilder.getArray(parentName).append(value); } } else { payloadBuilder.setField(name, value); } } /** * Adds the values and children (recursively) in the given xml element to the given {@link ConfigPayloadBuilder}. * @param currElem The element representing a config parameter. * @param payloadBuilder The builder to use when adding elements. */ private void parseElement(Element currElem, ConfigPayloadBuilder payloadBuilder, String parentName) { List<Element> children = XML.getChildren(currElem); try { if (children.isEmpty()) { parseLeaf(currElem, payloadBuilder, parentName); } else { parseComplex(currElem, children, payloadBuilder, parentName); } } catch (Exception exception) { throw new ConfigurationRuntimeException("Error parsing element at " + XML.getNodePath(currElem, " > ") + ": " + Exceptions.toMessageString(exception)); } } private void verifyLegalOperation(Element currElem) { String operation = currElem.getAttribute("operation"); if (! operation.equalsIgnoreCase("append")) throw new ConfigurationRuntimeException("The only supported array operation is 'append', got '" + operation + "' at XML node '" + XML.getNodePath(currElem, " > ") + "'."); } private ConfigPayloadBuilder getBuilderForInnerArray(Element element, ConfigPayloadBuilder payloadBuilder, String arrayName) { String operation = element.getAttribute("operation").toLowerCase(); ConfigPayloadBuilder arrayPayloadBuilder; switch (operation) { case "append": arrayPayloadBuilder = payloadBuilder.getArray(arrayName).append(); break; case "clear": payloadBuilder.removeArray(arrayName); arrayPayloadBuilder = payloadBuilder; break; default: throw new RuntimeException("Unknown operation '" + operation + "' at XML node '" + XML.getNodePath(element, " > ") + "'."); } return arrayPayloadBuilder; } }