comment
stringlengths
1
45k
method_body
stringlengths
23
281k
target_code
stringlengths
0
5.16k
method_body_after
stringlengths
12
281k
context_before
stringlengths
8
543k
context_after
stringlengths
8
543k
why do you get() then set() the same thing?
public void run() { if (queue == null) { throw new NullPointerException("channel is not allowed to be null"); } List<ItemOrList> drainList = new ArrayList<>(queue.size() + 1); try { for (; ; ) { drainList.clear(); drainList.add(queue.take()); queue.drainTo(drainList); for (int i = 0; i < drainList.size(); i++) { ItemOrList o = drainList.get(i); drainList.set(i, o); if (o.item != null) { for (LogHandler handler : handlers) { handler.handle(o.item); } } else if (o.list != null) { for (LogHandler handler : handlers) { handler.handle(o.list); } } else { throw new IllegalArgumentException("not LogMessage or List: " + o); } } } } catch (InterruptedException e) { } catch (Throwable t) { if (fatalErrorHandler != null) { fatalErrorHandler.handle(t, null); } } finally { log.fine("Handler thread " + getName() + " exiting, removing handlers"); for (LogHandler handler : handlers) { log.fine("Removing handler " + handler); handler.close(); } handlers.clear(); log.fine("Handler thread " + getName() + " done"); } }
ItemOrList o = drainList.get(i);
public void run() { if (queue == null) { throw new NullPointerException("channel is not allowed to be null"); } List<ItemOrList> drainList = new ArrayList<>(queue.size() + 1); try { for (; ; ) { drainList.clear(); drainList.add(queue.take()); queue.drainTo(drainList); for (int i = 0; i < drainList.size(); i++) { ItemOrList o = drainList.get(i); drainList.set(i, null); if (o.item != null) { for (LogHandler handler : handlers) { handler.handle(o.item); } } else if (o.list != null) { for (LogHandler handler : handlers) { handler.handle(o.list); } } else { throw new IllegalArgumentException("not LogMessage or List: " + o); } } } } catch (InterruptedException e) { } catch (Throwable t) { if (fatalErrorHandler != null) { fatalErrorHandler.handle(t, null); } } finally { log.fine("Handler thread " + getName() + " exiting, removing handlers"); for (LogHandler handler : handlers) { log.fine("Removing handler " + handler); handler.close(); } handlers.clear(); log.fine("Handler thread " + getName() + " done"); } }
class ItemOrList { final LogMessage item; final List<LogMessage> list; ItemOrList(LogMessage i) { this.item = i; this.list = null; } ItemOrList(List<LogMessage> l) { this.item = null; this.list = l; } public String toString() { return "item=" + item + ", list=" + list; } }
class ItemOrList { final LogMessage item; final List<LogMessage> list; ItemOrList(LogMessage i) { this.item = i; this.list = null; } ItemOrList(List<LogMessage> l) { this.item = null; this.list = l; } public String toString() { return "item=" + item + ", list=" + list; } }
Good point.
private void run() { try { while (true) { while (maintenance()) { waitForTrigger(2000); } waitForTrigger(2000); } } catch (Exception e) { System.err.println("Fatal exception in FilesArchived-maintainer thread: "+e); } }
while (maintenance()) {
private void run() { try { while (true) { maintenance(); waitForTrigger(2000); } } catch (Exception e) { System.err.println("Fatal exception in FilesArchived-maintainer thread: "+e); } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private final Object mutex = new Object(); private List<LogFile> knownFiles; public static final long compressAfterMillis = 2L * 3600 * 1000; private static final long maxAgeDays = 30; private static final long sizeLimit = 30L * (1L << 30); private void waitForTrigger(long milliS) throws InterruptedException { synchronized (mutex) { mutex.wait(milliS); } } /** * Creates an instance of FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.setName("FilesArchived-maintainer"); thread.start(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public void triggerMaintenance() { synchronized (mutex) { mutex.notifyAll(); } } synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static final Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { retval.addAll(scanDir(sub)); } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private final Object mutex = new Object(); private List<LogFile> knownFiles; public static final long compressAfterMillis = 2L * 3600 * 1000; private static final long maxAgeDays = 30; private static final long sizeLimit = 30L * (1L << 30); private void waitForTrigger(long milliS) throws InterruptedException { synchronized (mutex) { mutex.wait(milliS); } } /** * Creates an instance of FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.setName("FilesArchived-maintainer"); thread.start(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public void triggerMaintenance() { synchronized (mutex) { mutex.notifyAll(); } } synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static final Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { retval.addAll(scanDir(sub)); } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
Argh, null it should be.
public void run() { if (queue == null) { throw new NullPointerException("channel is not allowed to be null"); } List<ItemOrList> drainList = new ArrayList<>(queue.size() + 1); try { for (; ; ) { drainList.clear(); drainList.add(queue.take()); queue.drainTo(drainList); for (int i = 0; i < drainList.size(); i++) { ItemOrList o = drainList.get(i); drainList.set(i, o); if (o.item != null) { for (LogHandler handler : handlers) { handler.handle(o.item); } } else if (o.list != null) { for (LogHandler handler : handlers) { handler.handle(o.list); } } else { throw new IllegalArgumentException("not LogMessage or List: " + o); } } } } catch (InterruptedException e) { } catch (Throwable t) { if (fatalErrorHandler != null) { fatalErrorHandler.handle(t, null); } } finally { log.fine("Handler thread " + getName() + " exiting, removing handlers"); for (LogHandler handler : handlers) { log.fine("Removing handler " + handler); handler.close(); } handlers.clear(); log.fine("Handler thread " + getName() + " done"); } }
ItemOrList o = drainList.get(i);
public void run() { if (queue == null) { throw new NullPointerException("channel is not allowed to be null"); } List<ItemOrList> drainList = new ArrayList<>(queue.size() + 1); try { for (; ; ) { drainList.clear(); drainList.add(queue.take()); queue.drainTo(drainList); for (int i = 0; i < drainList.size(); i++) { ItemOrList o = drainList.get(i); drainList.set(i, null); if (o.item != null) { for (LogHandler handler : handlers) { handler.handle(o.item); } } else if (o.list != null) { for (LogHandler handler : handlers) { handler.handle(o.list); } } else { throw new IllegalArgumentException("not LogMessage or List: " + o); } } } } catch (InterruptedException e) { } catch (Throwable t) { if (fatalErrorHandler != null) { fatalErrorHandler.handle(t, null); } } finally { log.fine("Handler thread " + getName() + " exiting, removing handlers"); for (LogHandler handler : handlers) { log.fine("Removing handler " + handler); handler.close(); } handlers.clear(); log.fine("Handler thread " + getName() + " done"); } }
class ItemOrList { final LogMessage item; final List<LogMessage> list; ItemOrList(LogMessage i) { this.item = i; this.list = null; } ItemOrList(List<LogMessage> l) { this.item = null; this.list = l; } public String toString() { return "item=" + item + ", list=" + list; } }
class ItemOrList { final LogMessage item; final List<LogMessage> list; ItemOrList(LogMessage i) { this.item = i; this.list = null; } ItemOrList(List<LogMessage> l) { this.item = null; this.list = l; } public String toString() { return "item=" + item + ", list=" + list; } }
If you call `DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable` after successfully creating table, you should also call this when `replayCreateTable()`, even you call the `initDynamicPartitionTable()` in `DynamicPartitionScheduler`. Because "replay" means do exactly what is done before.
private void createOlapTable(Database db, CreateTableStmt stmt) throws DdlException { String tableName = stmt.getTableName(); LOG.debug("begin create olap table: {}", tableName); List<Column> baseSchema = stmt.getColumns(); validateColumns(baseSchema); PartitionDesc partitionDesc = stmt.getPartitionDesc(); PartitionInfo partitionInfo = null; Map<String, Long> partitionNameToId = Maps.newHashMap(); if (partitionDesc != null) { if (partitionDesc instanceof RangePartitionDesc) { RangePartitionDesc rangeDesc = (RangePartitionDesc) partitionDesc; for (SingleRangePartitionDesc desc : rangeDesc.getSingleRangePartitionDescs()) { long partitionId = getNextId(); partitionNameToId.put(desc.getPartitionName(), partitionId); } } partitionInfo = partitionDesc.toPartitionInfo(baseSchema, partitionNameToId); } else { if (DynamicPartitionUtil.checkDynamicPartitionPropertiesExist(stmt.getProperties())) { throw new DdlException("Only support dynamic partition properties on range partition table"); } long partitionId = getNextId(); partitionNameToId.put(tableName, partitionId); partitionInfo = new SinglePartitionInfo(); } KeysDesc keysDesc = stmt.getKeysDesc(); Preconditions.checkNotNull(keysDesc); KeysType keysType = keysDesc.getKeysType(); DistributionDesc distributionDesc = stmt.getDistributionDesc(); Preconditions.checkNotNull(distributionDesc); DistributionInfo distributionInfo = distributionDesc.toDistributionInfo(baseSchema); short shortKeyColumnCount = Catalog.calcShortKeyColumnCount(baseSchema, stmt.getProperties()); LOG.debug("create table[{}] short key column count: {}", tableName, shortKeyColumnCount); long tableId = Catalog.getInstance().getNextId(); OlapTable olapTable = new OlapTable(tableId, tableName, baseSchema, keysType, partitionInfo, distributionInfo); olapTable.setComment(stmt.getComment()); long baseIndexId = getNextId(); olapTable.setBaseIndexId(baseIndexId); Map<String, String> properties = stmt.getProperties(); TStorageType baseIndexStorageType = null; try { baseIndexStorageType = PropertyAnalyzer.analyzeStorageType(properties); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } Preconditions.checkNotNull(baseIndexStorageType); olapTable.setStorageTypeToIndex(baseIndexId, baseIndexStorageType); Set<String> bfColumns = null; double bfFpp = 0; try { bfColumns = PropertyAnalyzer.analyzeBloomFilterColumns(properties, baseSchema); if (bfColumns != null && bfColumns.isEmpty()) { bfColumns = null; } bfFpp = PropertyAnalyzer.analyzeBloomFilterFpp(properties); if (bfColumns != null && bfFpp == 0) { bfFpp = FeConstants.default_bloom_filter_fpp; } else if (bfColumns == null) { bfFpp = 0; } olapTable.setBloomFilterInfo(bfColumns, bfFpp); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } if (partitionInfo.getType() == PartitionType.UNPARTITIONED) { String partitionName = tableName; long partitionId = partitionNameToId.get(partitionName); DataProperty dataProperty = null; try { dataProperty = PropertyAnalyzer.analyzeDataProperty(stmt.getProperties(), DataProperty.DEFAULT_HDD_DATA_PROPERTY); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } Preconditions.checkNotNull(dataProperty); partitionInfo.setDataProperty(partitionId, dataProperty); short replicationNum = FeConstants.default_replication_num; try { replicationNum = PropertyAnalyzer.analyzeReplicationNum(properties, replicationNum); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } partitionInfo.setReplicationNum(partitionId, replicationNum); } try { String colocateGroup = PropertyAnalyzer.analyzeColocate(properties); if (colocateGroup != null) { if (Config.disable_colocate_join) { ErrorReport.reportDdlException(ErrorCode.ERR_COLOCATE_FEATURE_DISABLED); } String fullGroupName = db.getId() + "_" + colocateGroup; ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema(fullGroupName); if (groupSchema != null) { groupSchema.checkColocateSchema(olapTable); } getColocateTableIndex().addTableToGroup(db.getId(), olapTable, colocateGroup, null /* generate group id inside */); olapTable.setColocateGroup(colocateGroup); } } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } DynamicPartitionUtil.checkAllDynamicPartitionProperties(properties); int schemaVersion = 0; try { schemaVersion = PropertyAnalyzer.analyzeSchemaVersion(properties); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } int schemaHash = Util.schemaHash(schemaVersion, baseSchema, bfColumns, bfFpp); olapTable.setIndexSchemaInfo(baseIndexId, tableName, baseSchema, schemaVersion, schemaHash, shortKeyColumnCount); Pair<Long, Long> versionInfo = null; try { versionInfo = PropertyAnalyzer.analyzeVersionInfo(properties); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } Preconditions.checkNotNull(versionInfo); Set<Long> tabletIdSet = new HashSet<Long>(); try { if (partitionInfo.getType() == PartitionType.UNPARTITIONED) { String partitionName = tableName; long partitionId = partitionNameToId.get(partitionName); Partition partition = createPartitionWithIndices(db.getClusterName(), db.getId(), olapTable.getId(), olapTable.getBaseIndexId(), partitionId, partitionName, olapTable.getIndexIdToShortKeyColumnCount(), olapTable.getIndexIdToSchemaHash(), olapTable.getIndexIdToStorageType(), olapTable.getIndexIdToSchema(), keysType, distributionInfo, partitionInfo.getDataProperty(partitionId).getStorageMedium(), partitionInfo.getReplicationNum(partitionId), versionInfo, bfColumns, bfFpp, tabletIdSet); olapTable.addPartition(partition); } else if (partitionInfo.getType() == PartitionType.RANGE) { try { PropertyAnalyzer.analyzeDataProperty(stmt.getProperties(), DataProperty.DEFAULT_HDD_DATA_PROPERTY); PropertyAnalyzer.analyzeReplicationNum(properties, FeConstants.default_replication_num); if (properties != null && !properties.isEmpty()) { if (partitionInfo.isMultiColumnPartition()) { throw new DdlException("Dynamic partition only support single column partition"); } Map<String, String> dynamicPartitionProperties = DynamicPartitionUtil.analyzeDynamicPartition(db, olapTable, properties); TableProperty tableProperty = new TableProperty(); tableProperty.modifyTableProperties(dynamicPartitionProperties); dynamicPartitionScheduler.createOrUpdateRuntimeInfo( tableName, DynamicPartitionScheduler.LAST_UPDATE_TIME, TimeUtils.getCurrentFormatTime()); olapTable.setTableProperty(tableProperty); } if (properties != null && !properties.isEmpty()) { throw new DdlException("Unknown properties: " + properties); } } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo; for (Map.Entry<String, Long> entry : partitionNameToId.entrySet()) { DataProperty dataProperty = rangePartitionInfo.getDataProperty(entry.getValue()); Partition partition = createPartitionWithIndices(db.getClusterName(), db.getId(), olapTable.getId(), olapTable.getBaseIndexId(), entry.getValue(), entry.getKey(), olapTable.getIndexIdToShortKeyColumnCount(), olapTable.getIndexIdToSchemaHash(), olapTable.getIndexIdToStorageType(), olapTable.getIndexIdToSchema(), keysType, distributionInfo, dataProperty.getStorageMedium(), partitionInfo.getReplicationNum(entry.getValue()), versionInfo, bfColumns, bfFpp, tabletIdSet); olapTable.addPartition(partition); } } else { throw new DdlException("Unsupport partition method: " + partitionInfo.getType().name()); } if (!db.createTableWithLock(olapTable, false, stmt.isSetIfNotExists())) { ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists"); } if (getColocateTableIndex().isColocateTable(tableId)) { GroupId groupId = getColocateTableIndex().getGroup(tableId); List<List<Long>> backendsPerBucketSeq = getColocateTableIndex().getBackendsPerBucketSeq(groupId); ColocatePersistInfo info = ColocatePersistInfo.createForAddTable(groupId, tableId, backendsPerBucketSeq); editLog.logColocateAddTable(info); } LOG.info("successfully create table[{};{}]", tableName, tableId); DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable(db.getId(), olapTable); } catch (DdlException e) { for (Long tabletId : tabletIdSet) { Catalog.getCurrentInvertedIndex().deleteTablet(tabletId); } if (getColocateTableIndex().isColocateTable(tableId)) { getColocateTableIndex().removeTable(tableId); } throw e; } return; }
DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable(db.getId(), olapTable);
private void createOlapTable(Database db, CreateTableStmt stmt) throws DdlException { String tableName = stmt.getTableName(); LOG.debug("begin create olap table: {}", tableName); List<Column> baseSchema = stmt.getColumns(); validateColumns(baseSchema); PartitionDesc partitionDesc = stmt.getPartitionDesc(); PartitionInfo partitionInfo = null; Map<String, Long> partitionNameToId = Maps.newHashMap(); if (partitionDesc != null) { if (partitionDesc instanceof RangePartitionDesc) { RangePartitionDesc rangeDesc = (RangePartitionDesc) partitionDesc; for (SingleRangePartitionDesc desc : rangeDesc.getSingleRangePartitionDescs()) { long partitionId = getNextId(); partitionNameToId.put(desc.getPartitionName(), partitionId); } } partitionInfo = partitionDesc.toPartitionInfo(baseSchema, partitionNameToId); } else { if (DynamicPartitionUtil.checkDynamicPartitionPropertiesExist(stmt.getProperties())) { throw new DdlException("Only support dynamic partition properties on range partition table"); } long partitionId = getNextId(); partitionNameToId.put(tableName, partitionId); partitionInfo = new SinglePartitionInfo(); } KeysDesc keysDesc = stmt.getKeysDesc(); Preconditions.checkNotNull(keysDesc); KeysType keysType = keysDesc.getKeysType(); DistributionDesc distributionDesc = stmt.getDistributionDesc(); Preconditions.checkNotNull(distributionDesc); DistributionInfo distributionInfo = distributionDesc.toDistributionInfo(baseSchema); short shortKeyColumnCount = Catalog.calcShortKeyColumnCount(baseSchema, stmt.getProperties()); LOG.debug("create table[{}] short key column count: {}", tableName, shortKeyColumnCount); TableIndexes indexes = new TableIndexes(stmt.getIndexes()); long tableId = Catalog.getInstance().getNextId(); OlapTable olapTable = new OlapTable(tableId, tableName, baseSchema, keysType, partitionInfo, distributionInfo, indexes); olapTable.setComment(stmt.getComment()); long baseIndexId = getNextId(); olapTable.setBaseIndexId(baseIndexId); Map<String, String> properties = stmt.getProperties(); TStorageType baseIndexStorageType = null; try { baseIndexStorageType = PropertyAnalyzer.analyzeStorageType(properties); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } Preconditions.checkNotNull(baseIndexStorageType); olapTable.setStorageTypeToIndex(baseIndexId, baseIndexStorageType); Set<String> bfColumns = null; double bfFpp = 0; try { bfColumns = PropertyAnalyzer.analyzeBloomFilterColumns(properties, baseSchema); if (bfColumns != null && bfColumns.isEmpty()) { bfColumns = null; } bfFpp = PropertyAnalyzer.analyzeBloomFilterFpp(properties); if (bfColumns != null && bfFpp == 0) { bfFpp = FeConstants.default_bloom_filter_fpp; } else if (bfColumns == null) { bfFpp = 0; } olapTable.setBloomFilterInfo(bfColumns, bfFpp); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } if (partitionInfo.getType() == PartitionType.UNPARTITIONED) { String partitionName = tableName; long partitionId = partitionNameToId.get(partitionName); DataProperty dataProperty = null; try { dataProperty = PropertyAnalyzer.analyzeDataProperty(stmt.getProperties(), DataProperty.DEFAULT_HDD_DATA_PROPERTY); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } Preconditions.checkNotNull(dataProperty); partitionInfo.setDataProperty(partitionId, dataProperty); short replicationNum = FeConstants.default_replication_num; try { replicationNum = PropertyAnalyzer.analyzeReplicationNum(properties, replicationNum); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } partitionInfo.setReplicationNum(partitionId, replicationNum); } try { String colocateGroup = PropertyAnalyzer.analyzeColocate(properties); if (colocateGroup != null) { if (Config.disable_colocate_join) { ErrorReport.reportDdlException(ErrorCode.ERR_COLOCATE_FEATURE_DISABLED); } String fullGroupName = db.getId() + "_" + colocateGroup; ColocateGroupSchema groupSchema = colocateTableIndex.getGroupSchema(fullGroupName); if (groupSchema != null) { groupSchema.checkColocateSchema(olapTable); } getColocateTableIndex().addTableToGroup(db.getId(), olapTable, colocateGroup, null /* generate group id inside */); olapTable.setColocateGroup(colocateGroup); } } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } int schemaVersion = 0; try { schemaVersion = PropertyAnalyzer.analyzeSchemaVersion(properties); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } int schemaHash = Util.schemaHash(schemaVersion, baseSchema, bfColumns, bfFpp); olapTable.setIndexSchemaInfo(baseIndexId, tableName, baseSchema, schemaVersion, schemaHash, shortKeyColumnCount); Pair<Long, Long> versionInfo = null; try { versionInfo = PropertyAnalyzer.analyzeVersionInfo(properties); } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } Preconditions.checkNotNull(versionInfo); Set<Long> tabletIdSet = new HashSet<Long>(); try { if (partitionInfo.getType() == PartitionType.UNPARTITIONED) { String partitionName = tableName; long partitionId = partitionNameToId.get(partitionName); Partition partition = createPartitionWithIndices(db.getClusterName(), db.getId(), olapTable.getId(), olapTable.getBaseIndexId(), partitionId, partitionName, olapTable.getIndexIdToShortKeyColumnCount(), olapTable.getIndexIdToSchemaHash(), olapTable.getIndexIdToStorageType(), olapTable.getIndexIdToSchema(), keysType, distributionInfo, partitionInfo.getDataProperty(partitionId).getStorageMedium(), partitionInfo.getReplicationNum(partitionId), versionInfo, bfColumns, bfFpp, tabletIdSet, olapTable.getCopiedIndexes()); olapTable.addPartition(partition); } else if (partitionInfo.getType() == PartitionType.RANGE) { try { PropertyAnalyzer.analyzeDataProperty(stmt.getProperties(), DataProperty.DEFAULT_HDD_DATA_PROPERTY); PropertyAnalyzer.analyzeReplicationNum(properties, FeConstants.default_replication_num); DynamicPartitionUtil.checkAndSetDynamicPartitionProperty(olapTable, properties); if (properties != null && !properties.isEmpty()) { throw new DdlException("Unknown properties: " + properties); } } catch (AnalysisException e) { throw new DdlException(e.getMessage()); } RangePartitionInfo rangePartitionInfo = (RangePartitionInfo) partitionInfo; for (Map.Entry<String, Long> entry : partitionNameToId.entrySet()) { DataProperty dataProperty = rangePartitionInfo.getDataProperty(entry.getValue()); Partition partition = createPartitionWithIndices(db.getClusterName(), db.getId(), olapTable.getId(), olapTable.getBaseIndexId(), entry.getValue(), entry.getKey(), olapTable.getIndexIdToShortKeyColumnCount(), olapTable.getIndexIdToSchemaHash(), olapTable.getIndexIdToStorageType(), olapTable.getIndexIdToSchema(), keysType, distributionInfo, dataProperty.getStorageMedium(), partitionInfo.getReplicationNum(entry.getValue()), versionInfo, bfColumns, bfFpp, tabletIdSet, olapTable.getCopiedIndexes()); olapTable.addPartition(partition); } } else { throw new DdlException("Unsupport partition method: " + partitionInfo.getType().name()); } if (!db.createTableWithLock(olapTable, false, stmt.isSetIfNotExists())) { ErrorReport.reportDdlException(ErrorCode.ERR_CANT_CREATE_TABLE, tableName, "table already exists"); } if (getColocateTableIndex().isColocateTable(tableId)) { GroupId groupId = getColocateTableIndex().getGroup(tableId); List<List<Long>> backendsPerBucketSeq = getColocateTableIndex().getBackendsPerBucketSeq(groupId); ColocatePersistInfo info = ColocatePersistInfo.createForAddTable(groupId, tableId, backendsPerBucketSeq); editLog.logColocateAddTable(info); } LOG.info("successfully create table[{};{}]", tableName, tableId); DynamicPartitionUtil.registerOrRemoveDynamicPartitionTable(db.getId(), olapTable); dynamicPartitionScheduler.createOrUpdateRuntimeInfo( tableName, DynamicPartitionScheduler.LAST_UPDATE_TIME, TimeUtils.getCurrentFormatTime()); } catch (DdlException e) { for (Long tabletId : tabletIdSet) { Catalog.getCurrentInvertedIndex().deleteTablet(tabletId); } if (getColocateTableIndex().isColocateTable(tableId)) { getColocateTableIndex().removeTable(tableId); } throw e; } return; }
class SingletonHolder { private static final Catalog INSTANCE = new Catalog(); }
class SingletonHolder { private static final Catalog INSTANCE = new Catalog(); }
RIP legmessage :'(
public void run() { if (queue == null) { throw new NullPointerException("channel is not allowed to be null"); } List<ItemOrList> drainList = new ArrayList<>(queue.size() + 1); try { for (; ; ) { drainList.clear(); drainList.add(queue.take()); queue.drainTo(drainList); for (int i = 0; i < drainList.size(); i++) { ItemOrList o = drainList.get(i); drainList.set(i, null); if (o.item != null) { for (LogHandler handler : handlers) { handler.handle(o.item); } } else if (o.list != null) { for (LogHandler handler : handlers) { handler.handle(o.list); } } else { throw new IllegalArgumentException("not LogMessage or List: " + o); } } } } catch (InterruptedException e) { } catch (Throwable t) { if (fatalErrorHandler != null) { fatalErrorHandler.handle(t, null); } } finally { log.fine("Handler thread " + getName() + " exiting, removing handlers"); for (LogHandler handler : handlers) { log.fine("Removing handler " + handler); handler.close(); } handlers.clear(); log.fine("Handler thread " + getName() + " done"); } }
handler.handle(o.item);
public void run() { if (queue == null) { throw new NullPointerException("channel is not allowed to be null"); } List<ItemOrList> drainList = new ArrayList<>(queue.size() + 1); try { for (; ; ) { drainList.clear(); drainList.add(queue.take()); queue.drainTo(drainList); for (int i = 0; i < drainList.size(); i++) { ItemOrList o = drainList.get(i); drainList.set(i, null); if (o.item != null) { for (LogHandler handler : handlers) { handler.handle(o.item); } } else if (o.list != null) { for (LogHandler handler : handlers) { handler.handle(o.list); } } else { throw new IllegalArgumentException("not LogMessage or List: " + o); } } } } catch (InterruptedException e) { } catch (Throwable t) { if (fatalErrorHandler != null) { fatalErrorHandler.handle(t, null); } } finally { log.fine("Handler thread " + getName() + " exiting, removing handlers"); for (LogHandler handler : handlers) { log.fine("Removing handler " + handler); handler.close(); } handlers.clear(); log.fine("Handler thread " + getName() + " done"); } }
class ItemOrList { final LogMessage item; final List<LogMessage> list; ItemOrList(LogMessage i) { this.item = i; this.list = null; } ItemOrList(List<LogMessage> l) { this.item = null; this.list = l; } public String toString() { return "item=" + item + ", list=" + list; } }
class ItemOrList { final LogMessage item; final List<LogMessage> list; ItemOrList(LogMessage i) { this.item = i; this.list = null; } ItemOrList(List<LogMessage> l) { this.item = null; this.list = l; } public String toString() { return "item=" + item + ", list=" + list; } }
`IllegalArgumentException`?
private void validateMaxClusterSize(int maxClusterSize, VespaModel model) { var invalidClusters = model.allClusters().stream() .filter(clusterId -> { var cluster = model.provisioned().all().get(clusterId); var clusterSize = cluster.maxResources().nodes(); return clusterSize > maxClusterSize; }) .map(ClusterSpec.Id::value) .collect(Collectors.toList()); if (!invalidClusters.isEmpty()) { var clusterNames = String.join(", ", invalidClusters); throw new RuntimeException("Clusters " + clusterNames + " exceeded max cluster size of " + maxClusterSize); } }
private void validateMaxClusterSize(int maxClusterSize, VespaModel model) { var invalidClusters = model.allClusters().stream() .filter(clusterId -> { var cluster = model.provisioned().all().get(clusterId); var clusterSize = cluster.maxResources().nodes(); return clusterSize > maxClusterSize; }) .map(ClusterSpec.Id::value) .collect(Collectors.toList()); if (!invalidClusters.isEmpty()) { var clusterNames = String.join(", ", invalidClusters); throw new IllegalArgumentException("Clusters " + clusterNames + " exceeded max cluster size of " + maxClusterSize); } }
class QuotaValidator extends Validator { @Override public void validate(VespaModel model, DeployState deployState) { var quota = deployState.getProperties().quota(); quota.maxClusterSize().ifPresent(maxClusterSize -> validateMaxClusterSize(maxClusterSize, model)); } }
class QuotaValidator extends Validator { @Override public void validate(VespaModel model, DeployState deployState) { var quota = deployState.getProperties().quota(); quota.maxClusterSize().ifPresent(maxClusterSize -> validateMaxClusterSize(maxClusterSize, model)); } /** Check that all clusters in the application do not exceed the quota max cluster size. */ }
It would perhaps be better if the `readApplicationId()` returned an optional, and `getApplicationId()` threw on empty? I fear something else may go wrong, and we then believe that was because the id didn't exist, even though it does.
public Optional<ApplicationId> getOptionalApplicationId() { try { return Optional.of(getApplicationId()); } catch (RuntimeException e) { return Optional.empty(); } }
return Optional.of(getApplicationId());
public Optional<ApplicationId> getOptionalApplicationId() { try { return Optional.of(getApplicationId()); } catch (RuntimeException e) { return Optional.empty(); } }
class Session implements Comparable<Session> { private final long sessionId; protected final TenantName tenant; protected final SessionZooKeeperClient sessionZooKeeperClient; protected final Optional<ApplicationPackage> applicationPackage; protected Session(TenantName tenant, long sessionId, SessionZooKeeperClient sessionZooKeeperClient) { this(tenant, sessionId, sessionZooKeeperClient, Optional.empty()); } protected Session(TenantName tenant, long sessionId, SessionZooKeeperClient sessionZooKeeperClient, ApplicationPackage applicationPackage) { this(tenant, sessionId, sessionZooKeeperClient, Optional.of(applicationPackage)); } private Session(TenantName tenant, long sessionId, SessionZooKeeperClient sessionZooKeeperClient, Optional<ApplicationPackage> applicationPackage) { this.tenant = tenant; this.sessionId = sessionId; this.sessionZooKeeperClient = sessionZooKeeperClient; this.applicationPackage = applicationPackage; } public final long getSessionId() { return sessionId; } public Session.Status getStatus() { return sessionZooKeeperClient.readStatus(); } public SessionZooKeeperClient getSessionZooKeeperClient() { return sessionZooKeeperClient; } @Override public String toString() { return "Session,id=" + sessionId; } public long getActiveSessionAtCreate() { return getMetaData().getPreviousActiveGeneration(); } /** * The status of this session. */ public enum Status { NEW, PREPARE, ACTIVATE, DEACTIVATE, DELETE, NONE; public static Status parse(String data) { for (Status status : Status.values()) { if (status.name().equals(data)) { return status; } } return Status.NEW; } } public TenantName getTenantName() { return tenant; } /** * Helper to provide a log message preamble for code dealing with sessions * @return log preamble */ public String logPre() { Optional<ApplicationId> applicationId; try { applicationId = Optional.of(getApplicationId()); } catch (Exception e) { applicationId = Optional.empty(); } return applicationId .filter(appId -> ! appId.equals(ApplicationId.defaultId())) .map(TenantRepository::logPre) .orElse(TenantRepository.logPre(getTenantName())); } public Instant getCreateTime() { return sessionZooKeeperClient.readCreateTime(); } public void setApplicationId(ApplicationId applicationId) { sessionZooKeeperClient.writeApplicationId(applicationId); } void setApplicationPackageReference(FileReference applicationPackageReference) { if (applicationPackageReference == null) throw new IllegalArgumentException(String.format( "Null application package file reference for tenant %s, session id %d", tenant, sessionId)); sessionZooKeeperClient.writeApplicationPackageReference(applicationPackageReference); } public void setVespaVersion(Version version) { sessionZooKeeperClient.writeVespaVersion(version); } public void setDockerImageRepository(Optional<DockerImage> dockerImageRepository) { sessionZooKeeperClient.writeDockerImageRepository(dockerImageRepository); } public void setAthenzDomain(Optional<AthenzDomain> athenzDomain) { sessionZooKeeperClient.writeAthenzDomain(athenzDomain); } /** Returns application id read from ZooKeeper. Will throw RuntimeException if not found */ public ApplicationId getApplicationId() { return sessionZooKeeperClient.readApplicationId(); } /** Returns application id read from ZooKeeper. Will return Optional.empty() if not found */ public FileReference getApplicationPackageReference() {return sessionZooKeeperClient.readApplicationPackageReference(); } public Optional<DockerImage> getDockerImageRepository() { return sessionZooKeeperClient.readDockerImageRepository(); } public Version getVespaVersion() { return sessionZooKeeperClient.readVespaVersion(); } public Optional<AthenzDomain> getAthenzDomain() { return sessionZooKeeperClient.readAthenzDomain(); } public AllocatedHosts getAllocatedHosts() { return sessionZooKeeperClient.getAllocatedHosts(); } public Transaction createDeactivateTransaction() { return createSetStatusTransaction(Status.DEACTIVATE); } private Transaction createSetStatusTransaction(Status status) { return sessionZooKeeperClient.createWriteStatusTransaction(status); } public boolean isNewerThan(long sessionId) { return getSessionId() > sessionId; } public ApplicationMetaData getMetaData() { return applicationPackage.isPresent() ? applicationPackage.get().getMetaData() : sessionZooKeeperClient.loadApplicationPackage().getMetaData(); } public ApplicationPackage getApplicationPackage() { return applicationPackage.orElseThrow(() -> new RuntimeException("No application package found for " + this)); } public ApplicationFile getApplicationFile(Path relativePath, LocalSession.Mode mode) { if (mode.equals(Session.Mode.WRITE)) { markSessionEdited(); } return getApplicationPackage().getFile(relativePath); } private void markSessionEdited() { setStatus(Session.Status.NEW); } void setStatus(Session.Status newStatus) { sessionZooKeeperClient.writeStatus(newStatus); } @Override public int compareTo(Session rhs) { Long lhsId = getSessionId(); Long rhsId = rhs.getSessionId(); return lhsId.compareTo(rhsId); } public enum Mode { READ, WRITE } }
class Session implements Comparable<Session> { private final long sessionId; protected final TenantName tenant; protected final SessionZooKeeperClient sessionZooKeeperClient; protected final Optional<ApplicationPackage> applicationPackage; protected Session(TenantName tenant, long sessionId, SessionZooKeeperClient sessionZooKeeperClient) { this(tenant, sessionId, sessionZooKeeperClient, Optional.empty()); } protected Session(TenantName tenant, long sessionId, SessionZooKeeperClient sessionZooKeeperClient, ApplicationPackage applicationPackage) { this(tenant, sessionId, sessionZooKeeperClient, Optional.of(applicationPackage)); } private Session(TenantName tenant, long sessionId, SessionZooKeeperClient sessionZooKeeperClient, Optional<ApplicationPackage> applicationPackage) { this.tenant = tenant; this.sessionId = sessionId; this.sessionZooKeeperClient = sessionZooKeeperClient; this.applicationPackage = applicationPackage; } public final long getSessionId() { return sessionId; } public Session.Status getStatus() { return sessionZooKeeperClient.readStatus(); } public SessionZooKeeperClient getSessionZooKeeperClient() { return sessionZooKeeperClient; } @Override public String toString() { return "Session,id=" + sessionId; } public long getActiveSessionAtCreate() { return getMetaData().getPreviousActiveGeneration(); } /** * The status of this session. */ public enum Status { NEW, PREPARE, ACTIVATE, DEACTIVATE, DELETE, NONE; public static Status parse(String data) { for (Status status : Status.values()) { if (status.name().equals(data)) { return status; } } return Status.NEW; } } public TenantName getTenantName() { return tenant; } /** * Helper to provide a log message preamble for code dealing with sessions * @return log preamble */ public String logPre() { Optional<ApplicationId> applicationId; try { applicationId = Optional.of(getApplicationId()); } catch (Exception e) { applicationId = Optional.empty(); } return applicationId .filter(appId -> ! appId.equals(ApplicationId.defaultId())) .map(TenantRepository::logPre) .orElse(TenantRepository.logPre(getTenantName())); } public Instant getCreateTime() { return sessionZooKeeperClient.readCreateTime(); } public void setApplicationId(ApplicationId applicationId) { sessionZooKeeperClient.writeApplicationId(applicationId); } void setApplicationPackageReference(FileReference applicationPackageReference) { if (applicationPackageReference == null) throw new IllegalArgumentException(String.format( "Null application package file reference for tenant %s, session id %d", tenant, sessionId)); sessionZooKeeperClient.writeApplicationPackageReference(applicationPackageReference); } public void setVespaVersion(Version version) { sessionZooKeeperClient.writeVespaVersion(version); } public void setDockerImageRepository(Optional<DockerImage> dockerImageRepository) { sessionZooKeeperClient.writeDockerImageRepository(dockerImageRepository); } public void setAthenzDomain(Optional<AthenzDomain> athenzDomain) { sessionZooKeeperClient.writeAthenzDomain(athenzDomain); } /** Returns application id read from ZooKeeper. Will throw RuntimeException if not found */ public ApplicationId getApplicationId() { return sessionZooKeeperClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Unable to read application id for session " + sessionId)); } /** Returns application id read from ZooKeeper. Will return Optional.empty() if not found */ public FileReference getApplicationPackageReference() {return sessionZooKeeperClient.readApplicationPackageReference(); } public Optional<DockerImage> getDockerImageRepository() { return sessionZooKeeperClient.readDockerImageRepository(); } public Version getVespaVersion() { return sessionZooKeeperClient.readVespaVersion(); } public Optional<AthenzDomain> getAthenzDomain() { return sessionZooKeeperClient.readAthenzDomain(); } public AllocatedHosts getAllocatedHosts() { return sessionZooKeeperClient.getAllocatedHosts(); } public Transaction createDeactivateTransaction() { return createSetStatusTransaction(Status.DEACTIVATE); } private Transaction createSetStatusTransaction(Status status) { return sessionZooKeeperClient.createWriteStatusTransaction(status); } public boolean isNewerThan(long sessionId) { return getSessionId() > sessionId; } public ApplicationMetaData getMetaData() { return applicationPackage.isPresent() ? applicationPackage.get().getMetaData() : sessionZooKeeperClient.loadApplicationPackage().getMetaData(); } public ApplicationPackage getApplicationPackage() { return applicationPackage.orElseThrow(() -> new RuntimeException("No application package found for " + this)); } public ApplicationFile getApplicationFile(Path relativePath, LocalSession.Mode mode) { if (mode.equals(Session.Mode.WRITE)) { markSessionEdited(); } return getApplicationPackage().getFile(relativePath); } private void markSessionEdited() { setStatus(Session.Status.NEW); } void setStatus(Session.Status newStatus) { sessionZooKeeperClient.writeStatus(newStatus); } @Override public int compareTo(Session rhs) { Long lhsId = getSessionId(); Long rhsId = rhs.getSessionId(); return lhsId.compareTo(rhsId); } public enum Mode { READ, WRITE } }
`SlimeUtils` lets you easily convert JSON/bytes/Strings, for later :)
public static TenantMetaData fromJsonString(String jsonString) { try { Slime data = new Slime(); new JsonDecoder().decode(data, Utf8.toBytes(jsonString)); Inspector root = data.get(); Inspector lastDeployTimestamp = root.field("lastDeployTimestamp"); return new TenantMetaData(Instant.ofEpochMilli(lastDeployTimestamp.asLong())); } catch (Exception e) { throw new IllegalArgumentException("Error parsing json metadata", e); } }
Inspector root = data.get();
public static TenantMetaData fromJsonString(String jsonString) { try { Slime data = new Slime(); new JsonDecoder().decode(data, Utf8.toBytes(jsonString)); Inspector root = data.get(); Inspector lastDeployTimestamp = root.field("lastDeployTimestamp"); return new TenantMetaData(Instant.ofEpochMilli(lastDeployTimestamp.asLong())); } catch (Exception e) { throw new IllegalArgumentException("Error parsing json metadata", e); } }
class TenantMetaData { private final Instant lastDeployTimestamp; public TenantMetaData(Instant instant) { this.lastDeployTimestamp = instant; } public Instant lastDeployTimestamp() { return lastDeployTimestamp; } public String asJsonString() { Slime slime = getSlime(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); try { new JsonFormat(false).encode(baos, slime); return baos.toString(StandardCharsets.UTF_8); } catch (IOException e) { throw new RuntimeException("Unable to encode metadata", e); } } private Slime getSlime() { Slime slime = new Slime(); Cursor meta = slime.setObject(); meta.setLong("lastDeployTimestamp", lastDeployTimestamp.toEpochMilli()); return slime; } }
class TenantMetaData { private final Instant lastDeployTimestamp; public TenantMetaData(Instant instant) { this.lastDeployTimestamp = instant; } public Instant lastDeployTimestamp() { return lastDeployTimestamp; } public String asJsonString() { Slime slime = getSlime(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); try { new JsonFormat(false).encode(baos, slime); return baos.toString(StandardCharsets.UTF_8); } catch (IOException e) { throw new RuntimeException("Unable to encode metadata", e); } } private Slime getSlime() { Slime slime = new Slime(); Cursor meta = slime.setObject(); meta.setLong("lastDeployTimestamp", lastDeployTimestamp.toEpochMilli()); return slime; } }
Thanks, forgot about `SlimeUtils`, I'll do a pass and convert some other code that should use SlimeUtils as well
public static TenantMetaData fromJsonString(String jsonString) { try { Slime data = new Slime(); new JsonDecoder().decode(data, Utf8.toBytes(jsonString)); Inspector root = data.get(); Inspector lastDeployTimestamp = root.field("lastDeployTimestamp"); return new TenantMetaData(Instant.ofEpochMilli(lastDeployTimestamp.asLong())); } catch (Exception e) { throw new IllegalArgumentException("Error parsing json metadata", e); } }
Inspector root = data.get();
public static TenantMetaData fromJsonString(String jsonString) { try { Slime data = new Slime(); new JsonDecoder().decode(data, Utf8.toBytes(jsonString)); Inspector root = data.get(); Inspector lastDeployTimestamp = root.field("lastDeployTimestamp"); return new TenantMetaData(Instant.ofEpochMilli(lastDeployTimestamp.asLong())); } catch (Exception e) { throw new IllegalArgumentException("Error parsing json metadata", e); } }
class TenantMetaData { private final Instant lastDeployTimestamp; public TenantMetaData(Instant instant) { this.lastDeployTimestamp = instant; } public Instant lastDeployTimestamp() { return lastDeployTimestamp; } public String asJsonString() { Slime slime = getSlime(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); try { new JsonFormat(false).encode(baos, slime); return baos.toString(StandardCharsets.UTF_8); } catch (IOException e) { throw new RuntimeException("Unable to encode metadata", e); } } private Slime getSlime() { Slime slime = new Slime(); Cursor meta = slime.setObject(); meta.setLong("lastDeployTimestamp", lastDeployTimestamp.toEpochMilli()); return slime; } }
class TenantMetaData { private final Instant lastDeployTimestamp; public TenantMetaData(Instant instant) { this.lastDeployTimestamp = instant; } public Instant lastDeployTimestamp() { return lastDeployTimestamp; } public String asJsonString() { Slime slime = getSlime(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); try { new JsonFormat(false).encode(baos, slime); return baos.toString(StandardCharsets.UTF_8); } catch (IOException e) { throw new RuntimeException("Unable to encode metadata", e); } } private Slime getSlime() { Slime slime = new Slime(); Cursor meta = slime.setObject(); meta.setLong("lastDeployTimestamp", lastDeployTimestamp.toEpochMilli()); return slime; } }
So this turns off all deletion for zones where the metadata is not used. I guess that's OK?
public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) { if ( ! useTenantMetaData.value()) return Set.of(); return tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .filter(tenantName -> !tenantName.equals(HOSTED_VESPA_TENANT)) .filter(tenantName -> getTenantMetaData(tenantRepository.getTenant(tenantName)).lastDeployTimestamp().isBefore(now.minus(ttlForUnusedTenant))) .peek(tenantRepository::deleteTenant) .collect(Collectors.toSet()); }
if ( ! useTenantMetaData.value()) return Set.of();
public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) { if ( ! useTenantMetaData.value()) return Set.of(); return tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .filter(tenantName -> !tenantName.equals(HOSTED_VESPA_TENANT)) .filter(tenantName -> getTenantMetaData(tenantRepository.getTenant(tenantName)).lastDeployTimestamp().isBefore(now.minus(ttlForUnusedTenant))) .peek(tenantRepository::deleteTenant) .collect(Collectors.toSet()); }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final Optional<InfraDeployer> infraDeployer; private final ConfigConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final FileDistributionStatus fileDistributionStatus; private final Orchestrator orchestrator; private final LogRetriever logRetriever; private final TesterClient testerClient; private final Metric metric; private final BooleanFlag useTenantMetaData; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, InfraDeployerProvider infraDeployerProvider, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), infraDeployerProvider.getInfraDeployer(), configConvergenceChecker, httpProxy, configserverConfig, orchestrator, new LogRetriever(), new FileDistributionStatus(), Clock.systemUTC(), testerClient, metric, flagSource); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, Clock clock) { this(tenantRepository, hostProvisioner, orchestrator, new ConfigserverConfig(new ConfigserverConfig.Builder()), new LogRetriever(), clock, new TesterClient(), new NullMetric(), new InMemoryFlagSource()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, ConfigserverConfig configserverConfig, LogRetriever logRetriever, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, Optional.of(hostProvisioner), Optional.empty(), new ConfigConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), configserverConfig, orchestrator, logRetriever, new FileDistributionStatus(), clock, testerClient, metric, flagSource); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, Optional<InfraDeployer> infraDeployer, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, LogRetriever logRetriever, FileDistributionStatus fileDistributionStatus, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.infraDeployer = infraDeployer; this.convergeChecker = configConvergenceChecker; this.httpProxy = httpProxy; this.configserverConfig = configserverConfig; this.orchestrator = orchestrator; this.logRetriever = logRetriever; this.fileDistributionStatus = fileDistributionStatus; this.clock = clock; this.testerClient = testerClient; this.metric = metric; this.useTenantMetaData = Flags.USE_TENANT_META_DATA.bindTo(flagSource); } public Metric metric() { return metric; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); try (ActionTimer timer = timerFor(applicationId, "deployment.prepareMillis")) { SessionRepository sessionRepository = tenant.getSessionRepository(); ConfigChangeActions actions = sessionRepository.prepareLocalSession(session, logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = getTenant(applicationId); PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreSessionStaleFailure); return result; } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, false); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, boolean bootstrap) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)), bootstrap); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout, boolean bootstrap) { Optional<com.yahoo.config.provision.Deployment> infraDeployment = infraDeployer.flatMap(d -> d.getDeployment(application)); if (infraDeployment.isPresent()) return infraDeployment; Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveLocalSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); SessionRepository sessionRepository = tenant.getSessionRepository(); LocalSession newSession = sessionRepository.createSessionFromExisting(activeSession, logger, true, timeoutBudget); sessionRepository.addLocalSession(newSession); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, bootstrap)); } @Override public Optional<Instant> lastDeployTime(ApplicationId application) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); RemoteSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); return Optional.of(activeSession.getCreateTime()); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false); } public Transaction deactivateCurrentActivateNew(Session active, LocalSession prepared, boolean ignoreStaleSessionFailure) { Tenant tenant = tenantRepository.getTenant(prepared.getTenantName()); Transaction transaction = tenant.getSessionRepository().createActivateTransaction(prepared); if (active != null) { checkIfActiveHasChanged(prepared, active, ignoreStaleSessionFailure); checkIfActiveIsNewerThanSessionToBeActivated(prepared.getSessionId(), active.getSessionId()); transaction.add(active.createDeactivateTransaction().operations()); } if (useTenantMetaData.value()) transaction.add(writeTenantMetaData(tenant).operations()); return transaction; } private byte[] createMetaData(Tenant tenant) { return new TenantMetaData(tenant.getSessionRepository().clock().instant()).asJsonBytes(); } TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = tenantRepository.getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); return data.map(bytes -> TenantMetaData.fromJsonString(Utf8.toString(bytes))).orElse(new TenantMetaData(tenant.getCreatedTime())); } private Transaction writeTenantMetaData(Tenant tenant) { return new CuratorTransaction(tenantRepository.getCurator()) .add(CuratorOperations.setData(TenantRepository.getTenantPath(tenant.getName()).getAbsolute(), createMetaData(tenant))); } static void checkIfActiveHasChanged(LocalSession session, Session currentActiveSession, boolean ignoreStaleSessionFailure) { long activeSessionAtCreate = session.getActiveSessionAtCreate(); log.log(Level.FINE, currentActiveSession.logPre() + "active session id at create time=" + activeSessionAtCreate); if (activeSessionAtCreate == 0) return; long sessionId = session.getSessionId(); long currentActiveSessionSessionId = currentActiveSession.getSessionId(); log.log(Level.FINE, currentActiveSession.logPre() + "sessionId=" + sessionId + ", current active session=" + currentActiveSessionSessionId); if (currentActiveSession.isNewerThan(activeSessionAtCreate) && currentActiveSessionSessionId != sessionId) { String errMsg = currentActiveSession.logPre() + "Cannot activate session " + sessionId + " because the currently active session (" + currentActiveSessionSessionId + ") has changed since session " + sessionId + " was created (was " + activeSessionAtCreate + " at creation time)"; if (ignoreStaleSessionFailure) { log.warning(errMsg + " (Continuing because of force.)"); } else { throw new ActivationConflictException(errMsg); } } } static void checkIfActiveIsNewerThanSessionToBeActivated(long sessionId, long currentActiveSessionId) { if (sessionId < currentActiveSessionId) { throw new ActivationConflictException("It is not possible to activate session " + sessionId + ", because it is older than current active session (" + currentActiveSessionId + ")"); } } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ boolean delete(ApplicationId applicationId) { return delete(applicationId, Duration.ofSeconds(60)); } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ public boolean delete(ApplicationId applicationId, Duration waitTime) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { if ( ! tenantApplications.exists(applicationId)) return false; Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; long sessionId = activeSession.get(); RemoteSession remoteSession; try { remoteSession = getRemoteSession(tenant, sessionId); Transaction deleteTransaction = remoteSession.createDeleteTransaction(); deleteTransaction.commit(); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted"); if ( ! waitTime.isZero() && localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted"); } else { deleteTransaction.rollbackOrLog(); throw new InternalServerException(applicationId + " was not deleted (waited " + waitTime + "), session " + sessionId); } } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } } public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) { String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(getApplication(applicationId), hostName, CLUSTERCONTROLLER_CONTAINER.serviceName, relativePath); } public Long getApplicationGeneration(ApplicationId applicationId) { return getApplication(applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public boolean isSuspended(ApplicationId application) { return orchestrator.getAllSuspendedApplications().contains(application); } public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) { return fileDistributionStatus.status(getApplication(applicationId), timeout); } public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath, Duration keepFileReferences) { if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory"); Set<String> fileReferencesInUse = new HashSet<>(); for (var applicationId : listApplications()) { try { Optional<Application> app = getOptionalApplication(applicationId); if (app.isEmpty()) continue; fileReferencesInUse.addAll(app.get().getModel().fileReferences().stream() .map(FileReference::value) .collect(Collectors.toSet())); } catch (Exception e) { log.log(Level.WARNING, "Getting file references in use for '" + applicationId + "' failed", e); } } log.log(Level.FINE, "File references in use : " + fileReferencesInUse); Set<String> fileReferencesOnDisk = getFileReferencesOnDisk(fileReferencesPath); log.log(Level.FINE, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk); Instant instant = Instant.now().minus(keepFileReferences); Set<String> fileReferencesToDelete = fileReferencesOnDisk .stream() .filter(fileReference -> ! fileReferencesInUse.contains(fileReference)) .filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant)) .collect(Collectors.toSet()); if (fileReferencesToDelete.size() > 0) { log.log(Level.FINE, "Will delete file references not in use: " + fileReferencesToDelete); fileReferencesToDelete.forEach(fileReference -> { File file = new File(fileReferencesPath, fileReference); if ( ! IOUtils.recursiveDeleteDir(file)) log.log(Level.WARNING, "Could not delete " + file.getAbsolutePath()); }); } return fileReferencesToDelete; } public Set<FileReference> getFileReferences(ApplicationId applicationId) { return getOptionalApplication(applicationId).map(app -> app.getModel().fileReferences()).orElse(Set.of()); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } public Tenant getTenant(ApplicationId applicationId) { return tenantRepository.getTenant(applicationId.tenant()); } private Application getApplication(ApplicationId applicationId) { return getApplication(applicationId, Optional.empty()); } private Application getApplication(ApplicationId applicationId, Optional<Version> version) { try { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = getRemoteSession(tenant, sessionId); return session.ensureApplicationLoaded().getForVersionOrLatest(version, clock.instant()); } catch (NotFoundException e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "': " + e.getMessage()); throw e; } catch (Exception e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "'", e); throw e; } } private Optional<Application> getOptionalApplication(ApplicationId applicationId) { try { return Optional.of(getApplication(applicationId)); } catch (Exception e) { return Optional.empty(); } } public Set<ApplicationId> listApplications() { return tenantRepository.getAllTenants().stream() .flatMap(tenant -> tenant.getApplicationRepo().activeApplications().stream()) .collect(Collectors.toSet()); } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) { SessionRepository sessionRepository = getTenant(applicationId).getSessionRepository(); Instant end = Instant.now().plus(waitTime); do { if (sessionRepository.getRemoteSession(sessionId) == null) return true; try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */} } while (Instant.now().isBefore(end)); return false; } public Optional<String> getApplicationPackageReference(ApplicationId applicationId) { Optional<String> applicationPackage = Optional.empty(); RemoteSession session = getActiveSession(applicationId); if (session != null) { FileReference applicationPackageReference = session.getApplicationPackageReference(); File downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig().fileReferencesDir())); if (applicationPackageReference != null && ! fileReferenceExistsOnDisk(downloadDirectory, applicationPackageReference)) applicationPackage = Optional.of(applicationPackageReference.value()); } return applicationPackage; } public List<Version> getAllVersions(ApplicationId applicationId) { Optional<ApplicationSet> applicationSet = getCurrentActiveApplicationSet(getTenant(applicationId), applicationId); if (applicationSet.isEmpty()) return List.of(); else return applicationSet.get().getAllVersions(applicationId); } public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri, Duration timeout, Optional<Version> vespaVersion) { return convergeChecker.checkService(getApplication(applicationId, vespaVersion), hostAndPort, uri, timeout); } public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri, Duration timeoutPerService, Optional<Version> vespaVersion) { return convergeChecker.servicesToCheck(getApplication(applicationId, vespaVersion), uri, timeoutPerService); } public HttpResponse getLogs(ApplicationId applicationId, Optional<String> hostname, String apiParams) { String logServerURI = getLogServerURI(applicationId, hostname) + apiParams; return logRetriever.getLogs(logServerURI); } public HttpResponse getTesterStatus(ApplicationId applicationId) { return testerClient.getStatus(getTesterHostname(applicationId), getTesterPort(applicationId)); } public HttpResponse getTesterLog(ApplicationId applicationId, Long after) { return testerClient.getLog(getTesterHostname(applicationId), getTesterPort(applicationId), after); } public HttpResponse startTests(ApplicationId applicationId, String suite, byte[] config) { return testerClient.startTests(getTesterHostname(applicationId), getTesterPort(applicationId), suite, config); } public HttpResponse isTesterReady(ApplicationId applicationId) { return testerClient.isTesterReady(getTesterHostname(applicationId), getTesterPort(applicationId)); } private String getTesterHostname(ApplicationId applicationId) { return getTesterServiceInfo(applicationId).getHostName(); } private int getTesterPort(ApplicationId applicationId) { ServiceInfo serviceInfo = getTesterServiceInfo(applicationId); return serviceInfo.getPorts().stream().filter(portInfo -> portInfo.getTags().contains("http")).findFirst().get().getPort(); } private ServiceInfo getTesterServiceInfo(ApplicationId applicationId) { Application application = getApplication(applicationId); return application.getModel().getHosts().stream() .findFirst().orElseThrow(() -> new InternalServerException("Could not find any host for tester app " + applicationId.toFullString())) .getServices().stream() .filter(service -> CONTAINER.serviceName.equals(service.getServiceType())) .findFirst() .orElseThrow(() -> new InternalServerException("Could not find any tester container for tester app " + applicationId.toFullString())); } public CompletionWaiter activate(LocalSession session, Session previousActiveSession, ApplicationId applicationId, boolean ignoreSessionStaleFailure) { CompletionWaiter waiter = session.getSessionZooKeeperClient().createActiveWaiter(); NestedTransaction transaction = new NestedTransaction(); transaction.add(deactivateCurrentActivateNew(previousActiveSession, session, ignoreSessionStaleFailure)); hostProvisioner.ifPresent(provisioner -> provisioner.activate(transaction, applicationId, session.getAllocatedHosts().getHosts())); transaction.commit(); return waiter; } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public RemoteSession getActiveSession(ApplicationId applicationId) { return getActiveSession(getTenant(applicationId), applicationId); } public long getSessionIdForApplication(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); return getSessionIdForApplication(tenant, applicationId); } private long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (! applicationRepo.exists(applicationId)) throw new NotFoundException("Unknown application id '" + applicationId + "'"); return applicationRepo.requireActiveSessionOf(applicationId); } public void validateThatSessionIsNotActive(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatSessionIsPrepared(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if ( ! Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { Tenant tenant = getTenant(applicationId); SessionRepository sessionRepository = tenant.getSessionRepository(); RemoteSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionRepository.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget); sessionRepository.addLocalSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = getTenant(applicationId); tenant.getApplicationRepo().createApplication(applicationId); Optional<Long> activeSessionId = tenant.getApplicationRepo().activeSessionOf(applicationId); LocalSession session = tenant.getSessionRepository().createSession(applicationDirectory, applicationId, timeoutBudget, activeSessionId); tenant.getSessionRepository().addLocalSession(session); return session.getSessionId(); } public void deleteExpiredLocalSessions() { Map<Tenant, List<LocalSession>> sessionsPerTenant = new HashMap<>(); tenantRepository.getAllTenants().forEach(tenant -> sessionsPerTenant.put(tenant, tenant.getSessionRepository().getLocalSessions())); Set<ApplicationId> applicationIds = new HashSet<>(); sessionsPerTenant.values() .forEach(sessionList -> sessionList.stream() .map(Session::getOptionalApplicationId) .filter(Optional::isPresent) .forEach(appId -> applicationIds.add(appId.get()))); Map<ApplicationId, Long> activeSessions = new HashMap<>(); applicationIds.forEach(applicationId -> { RemoteSession activeSession = getActiveSession(applicationId); if (activeSession != null) activeSessions.put(applicationId, activeSession.getSessionId()); }); sessionsPerTenant.keySet().forEach(tenant -> tenant.getSessionRepository().deleteExpiredSessions(activeSessions)); } public int deleteExpiredSessionLocks(Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredLocks(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public int deleteExpiredRemoteSessions(Duration expiryTime) { return deleteExpiredRemoteSessions(clock, expiryTime); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredRemoteSessions(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public TenantRepository tenantRepository() { return tenantRepository; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().activeApplications(); } public ProtonMetricsResponse getProtonMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); ProtonMetricsRetriever protonMetricsRetriever = new ProtonMetricsRetriever(); return protonMetricsRetriever.getMetrics(application); } public DeploymentMetricsResponse getDeploymentMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); DeploymentMetricsRetriever deploymentMetricsRetriever = new DeploymentMetricsRetriever(); return deploymentMetricsRetriever.getMetrics(application); } public ApplicationMetaData getMetadataFromLocalSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } public ConfigserverConfig configserverConfig() { return configserverConfig; } public ApplicationId getApplicationIdForHostname(String hostname) { Optional<ApplicationId> applicationId = tenantRepository.getAllTenantNames().stream() .map(tenantName -> tenantRepository.getTenant(tenantName).getApplicationRepo().getApplicationIdForHostName(hostname)) .filter(Objects::nonNull) .findFirst(); return applicationId.orElse(null); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getSessionRepository().getLocalSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getSessionRepository().getRemoteSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } public Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private void cleanupTempDirectory(File tempDir) { logger.log(Level.FINE, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(Level.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } private RemoteSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getRemoteSession(tenant, applicationRepo.requireActiveSessionOf(applicationId)); } private RemoteSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getRemoteSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } public LocalSession getActiveLocalSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getLocalSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } private String getLogServerURI(ApplicationId applicationId, Optional<String> hostname) { if (hostname.isPresent() && HOSTED_VESPA_TENANT.equals(applicationId.tenant())) { return "http: } Application application = getApplication(applicationId); Collection<HostInfo> hostInfos = application.getModel().getHosts(); HostInfo logServerHostInfo = hostInfos.stream() .filter(host -> host.getServices().stream() .anyMatch(serviceInfo -> serviceInfo.getServiceType().equalsIgnoreCase("logserver"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find host info for logserver")); ServiceInfo serviceInfo = logServerHostInfo.getServices().stream().filter(service -> List.of(LOGSERVER_CONTAINER.serviceName, CONTAINER.serviceName).contains(service.getServiceType())) .findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host")); int port = servicePort(serviceInfo); return "http: } private int servicePort(ServiceInfo serviceInfo) { return serviceInfo.getPorts().stream() .filter(portInfo -> portInfo.getTags().stream().anyMatch(tag -> tag.equalsIgnoreCase("http"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port")) .getPort(); } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } public Zone zone() { return new Zone(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); } /** Emits as a metric the time in millis spent while holding this timer, with deployment ID as dimensions. */ public ActionTimer timerFor(ApplicationId id, String metricName) { return new ActionTimer(metric, clock, id, configserverConfig.environment(), configserverConfig.region(), metricName); } public static class ActionTimer implements AutoCloseable { private final Metric metric; private final Clock clock; private final ApplicationId id; private final String environment; private final String region; private final String name; private final Instant start; private ActionTimer(Metric metric, Clock clock, ApplicationId id, String environment, String region, String name) { this.metric = metric; this.clock = clock; this.id = id; this.environment = environment; this.region = region; this.name = name; this.start = clock.instant(); } @Override public void close() { metric.set(name, Duration.between(start, clock.instant()).toMillis(), metric.createContext(Map.of("applicationId", id.toFullString(), "tenantName", id.tenant().value(), "app", id.application().value() + "." + id.instance().value(), "zone", environment + "." + region))); } } }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final Optional<InfraDeployer> infraDeployer; private final ConfigConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final FileDistributionStatus fileDistributionStatus; private final Orchestrator orchestrator; private final LogRetriever logRetriever; private final TesterClient testerClient; private final Metric metric; private final BooleanFlag useTenantMetaData; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, InfraDeployerProvider infraDeployerProvider, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), infraDeployerProvider.getInfraDeployer(), configConvergenceChecker, httpProxy, configserverConfig, orchestrator, new LogRetriever(), new FileDistributionStatus(), Clock.systemUTC(), testerClient, metric, flagSource); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, Clock clock) { this(tenantRepository, hostProvisioner, orchestrator, new ConfigserverConfig(new ConfigserverConfig.Builder()), new LogRetriever(), clock, new TesterClient(), new NullMetric(), new InMemoryFlagSource()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, ConfigserverConfig configserverConfig, LogRetriever logRetriever, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, Optional.of(hostProvisioner), Optional.empty(), new ConfigConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), configserverConfig, orchestrator, logRetriever, new FileDistributionStatus(), clock, testerClient, metric, flagSource); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, Optional<InfraDeployer> infraDeployer, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, LogRetriever logRetriever, FileDistributionStatus fileDistributionStatus, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.infraDeployer = infraDeployer; this.convergeChecker = configConvergenceChecker; this.httpProxy = httpProxy; this.configserverConfig = configserverConfig; this.orchestrator = orchestrator; this.logRetriever = logRetriever; this.fileDistributionStatus = fileDistributionStatus; this.clock = clock; this.testerClient = testerClient; this.metric = metric; this.useTenantMetaData = Flags.USE_TENANT_META_DATA.bindTo(flagSource); } public Metric metric() { return metric; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); try (ActionTimer timer = timerFor(applicationId, "deployment.prepareMillis")) { SessionRepository sessionRepository = tenant.getSessionRepository(); ConfigChangeActions actions = sessionRepository.prepareLocalSession(session, logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = getTenant(applicationId); PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreSessionStaleFailure); return result; } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, false); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, boolean bootstrap) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)), bootstrap); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout, boolean bootstrap) { Optional<com.yahoo.config.provision.Deployment> infraDeployment = infraDeployer.flatMap(d -> d.getDeployment(application)); if (infraDeployment.isPresent()) return infraDeployment; Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveLocalSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); SessionRepository sessionRepository = tenant.getSessionRepository(); LocalSession newSession = sessionRepository.createSessionFromExisting(activeSession, logger, true, timeoutBudget); sessionRepository.addLocalSession(newSession); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, bootstrap)); } @Override public Optional<Instant> lastDeployTime(ApplicationId application) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); RemoteSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); return Optional.of(activeSession.getCreateTime()); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false); } public Transaction deactivateCurrentActivateNew(Session active, LocalSession prepared, boolean ignoreStaleSessionFailure) { Tenant tenant = tenantRepository.getTenant(prepared.getTenantName()); Transaction transaction = tenant.getSessionRepository().createActivateTransaction(prepared); if (active != null) { checkIfActiveHasChanged(prepared, active, ignoreStaleSessionFailure); checkIfActiveIsNewerThanSessionToBeActivated(prepared.getSessionId(), active.getSessionId()); transaction.add(active.createDeactivateTransaction().operations()); } if (useTenantMetaData.value()) transaction.add(writeTenantMetaData(tenant).operations()); return transaction; } private byte[] createMetaData(Tenant tenant) { return new TenantMetaData(tenant.getSessionRepository().clock().instant()).asJsonBytes(); } TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = tenantRepository.getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); return data.map(bytes -> TenantMetaData.fromJsonString(Utf8.toString(bytes))).orElse(new TenantMetaData(tenant.getCreatedTime())); } private Transaction writeTenantMetaData(Tenant tenant) { return new CuratorTransaction(tenantRepository.getCurator()) .add(CuratorOperations.setData(TenantRepository.getTenantPath(tenant.getName()).getAbsolute(), createMetaData(tenant))); } static void checkIfActiveHasChanged(LocalSession session, Session currentActiveSession, boolean ignoreStaleSessionFailure) { long activeSessionAtCreate = session.getActiveSessionAtCreate(); log.log(Level.FINE, currentActiveSession.logPre() + "active session id at create time=" + activeSessionAtCreate); if (activeSessionAtCreate == 0) return; long sessionId = session.getSessionId(); long currentActiveSessionSessionId = currentActiveSession.getSessionId(); log.log(Level.FINE, currentActiveSession.logPre() + "sessionId=" + sessionId + ", current active session=" + currentActiveSessionSessionId); if (currentActiveSession.isNewerThan(activeSessionAtCreate) && currentActiveSessionSessionId != sessionId) { String errMsg = currentActiveSession.logPre() + "Cannot activate session " + sessionId + " because the currently active session (" + currentActiveSessionSessionId + ") has changed since session " + sessionId + " was created (was " + activeSessionAtCreate + " at creation time)"; if (ignoreStaleSessionFailure) { log.warning(errMsg + " (Continuing because of force.)"); } else { throw new ActivationConflictException(errMsg); } } } static void checkIfActiveIsNewerThanSessionToBeActivated(long sessionId, long currentActiveSessionId) { if (sessionId < currentActiveSessionId) { throw new ActivationConflictException("It is not possible to activate session " + sessionId + ", because it is older than current active session (" + currentActiveSessionId + ")"); } } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ boolean delete(ApplicationId applicationId) { return delete(applicationId, Duration.ofSeconds(60)); } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ public boolean delete(ApplicationId applicationId, Duration waitTime) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { if ( ! tenantApplications.exists(applicationId)) return false; Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; long sessionId = activeSession.get(); RemoteSession remoteSession; try { remoteSession = getRemoteSession(tenant, sessionId); Transaction deleteTransaction = remoteSession.createDeleteTransaction(); deleteTransaction.commit(); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted"); if ( ! waitTime.isZero() && localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted"); } else { deleteTransaction.rollbackOrLog(); throw new InternalServerException(applicationId + " was not deleted (waited " + waitTime + "), session " + sessionId); } } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } } public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) { String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(getApplication(applicationId), hostName, CLUSTERCONTROLLER_CONTAINER.serviceName, relativePath); } public Long getApplicationGeneration(ApplicationId applicationId) { return getApplication(applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public boolean isSuspended(ApplicationId application) { return orchestrator.getAllSuspendedApplications().contains(application); } public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) { return fileDistributionStatus.status(getApplication(applicationId), timeout); } public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath, Duration keepFileReferences) { if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory"); Set<String> fileReferencesInUse = new HashSet<>(); for (var applicationId : listApplications()) { try { Optional<Application> app = getOptionalApplication(applicationId); if (app.isEmpty()) continue; fileReferencesInUse.addAll(app.get().getModel().fileReferences().stream() .map(FileReference::value) .collect(Collectors.toSet())); } catch (Exception e) { log.log(Level.WARNING, "Getting file references in use for '" + applicationId + "' failed", e); } } log.log(Level.FINE, "File references in use : " + fileReferencesInUse); Set<String> fileReferencesOnDisk = getFileReferencesOnDisk(fileReferencesPath); log.log(Level.FINE, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk); Instant instant = Instant.now().minus(keepFileReferences); Set<String> fileReferencesToDelete = fileReferencesOnDisk .stream() .filter(fileReference -> ! fileReferencesInUse.contains(fileReference)) .filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant)) .collect(Collectors.toSet()); if (fileReferencesToDelete.size() > 0) { log.log(Level.FINE, "Will delete file references not in use: " + fileReferencesToDelete); fileReferencesToDelete.forEach(fileReference -> { File file = new File(fileReferencesPath, fileReference); if ( ! IOUtils.recursiveDeleteDir(file)) log.log(Level.WARNING, "Could not delete " + file.getAbsolutePath()); }); } return fileReferencesToDelete; } public Set<FileReference> getFileReferences(ApplicationId applicationId) { return getOptionalApplication(applicationId).map(app -> app.getModel().fileReferences()).orElse(Set.of()); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } public Tenant getTenant(ApplicationId applicationId) { return tenantRepository.getTenant(applicationId.tenant()); } private Application getApplication(ApplicationId applicationId) { return getApplication(applicationId, Optional.empty()); } private Application getApplication(ApplicationId applicationId, Optional<Version> version) { try { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = getRemoteSession(tenant, sessionId); return session.ensureApplicationLoaded().getForVersionOrLatest(version, clock.instant()); } catch (NotFoundException e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "': " + e.getMessage()); throw e; } catch (Exception e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "'", e); throw e; } } private Optional<Application> getOptionalApplication(ApplicationId applicationId) { try { return Optional.of(getApplication(applicationId)); } catch (Exception e) { return Optional.empty(); } } public Set<ApplicationId> listApplications() { return tenantRepository.getAllTenants().stream() .flatMap(tenant -> tenant.getApplicationRepo().activeApplications().stream()) .collect(Collectors.toSet()); } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) { SessionRepository sessionRepository = getTenant(applicationId).getSessionRepository(); Instant end = Instant.now().plus(waitTime); do { if (sessionRepository.getRemoteSession(sessionId) == null) return true; try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */} } while (Instant.now().isBefore(end)); return false; } public Optional<String> getApplicationPackageReference(ApplicationId applicationId) { Optional<String> applicationPackage = Optional.empty(); RemoteSession session = getActiveSession(applicationId); if (session != null) { FileReference applicationPackageReference = session.getApplicationPackageReference(); File downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig().fileReferencesDir())); if (applicationPackageReference != null && ! fileReferenceExistsOnDisk(downloadDirectory, applicationPackageReference)) applicationPackage = Optional.of(applicationPackageReference.value()); } return applicationPackage; } public List<Version> getAllVersions(ApplicationId applicationId) { Optional<ApplicationSet> applicationSet = getCurrentActiveApplicationSet(getTenant(applicationId), applicationId); if (applicationSet.isEmpty()) return List.of(); else return applicationSet.get().getAllVersions(applicationId); } public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri, Duration timeout, Optional<Version> vespaVersion) { return convergeChecker.checkService(getApplication(applicationId, vespaVersion), hostAndPort, uri, timeout); } public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri, Duration timeoutPerService, Optional<Version> vespaVersion) { return convergeChecker.servicesToCheck(getApplication(applicationId, vespaVersion), uri, timeoutPerService); } public HttpResponse getLogs(ApplicationId applicationId, Optional<String> hostname, String apiParams) { String logServerURI = getLogServerURI(applicationId, hostname) + apiParams; return logRetriever.getLogs(logServerURI); } public HttpResponse getTesterStatus(ApplicationId applicationId) { return testerClient.getStatus(getTesterHostname(applicationId), getTesterPort(applicationId)); } public HttpResponse getTesterLog(ApplicationId applicationId, Long after) { return testerClient.getLog(getTesterHostname(applicationId), getTesterPort(applicationId), after); } public HttpResponse startTests(ApplicationId applicationId, String suite, byte[] config) { return testerClient.startTests(getTesterHostname(applicationId), getTesterPort(applicationId), suite, config); } public HttpResponse isTesterReady(ApplicationId applicationId) { return testerClient.isTesterReady(getTesterHostname(applicationId), getTesterPort(applicationId)); } private String getTesterHostname(ApplicationId applicationId) { return getTesterServiceInfo(applicationId).getHostName(); } private int getTesterPort(ApplicationId applicationId) { ServiceInfo serviceInfo = getTesterServiceInfo(applicationId); return serviceInfo.getPorts().stream().filter(portInfo -> portInfo.getTags().contains("http")).findFirst().get().getPort(); } private ServiceInfo getTesterServiceInfo(ApplicationId applicationId) { Application application = getApplication(applicationId); return application.getModel().getHosts().stream() .findFirst().orElseThrow(() -> new InternalServerException("Could not find any host for tester app " + applicationId.toFullString())) .getServices().stream() .filter(service -> CONTAINER.serviceName.equals(service.getServiceType())) .findFirst() .orElseThrow(() -> new InternalServerException("Could not find any tester container for tester app " + applicationId.toFullString())); } public CompletionWaiter activate(LocalSession session, Session previousActiveSession, ApplicationId applicationId, boolean ignoreSessionStaleFailure) { CompletionWaiter waiter = session.getSessionZooKeeperClient().createActiveWaiter(); NestedTransaction transaction = new NestedTransaction(); transaction.add(deactivateCurrentActivateNew(previousActiveSession, session, ignoreSessionStaleFailure)); hostProvisioner.ifPresent(provisioner -> provisioner.activate(transaction, applicationId, session.getAllocatedHosts().getHosts())); transaction.commit(); return waiter; } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public RemoteSession getActiveSession(ApplicationId applicationId) { return getActiveSession(getTenant(applicationId), applicationId); } public long getSessionIdForApplication(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); return getSessionIdForApplication(tenant, applicationId); } private long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (! applicationRepo.exists(applicationId)) throw new NotFoundException("Unknown application id '" + applicationId + "'"); return applicationRepo.requireActiveSessionOf(applicationId); } public void validateThatSessionIsNotActive(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatSessionIsPrepared(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if ( ! Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { Tenant tenant = getTenant(applicationId); SessionRepository sessionRepository = tenant.getSessionRepository(); RemoteSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionRepository.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget); sessionRepository.addLocalSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = getTenant(applicationId); tenant.getApplicationRepo().createApplication(applicationId); Optional<Long> activeSessionId = tenant.getApplicationRepo().activeSessionOf(applicationId); LocalSession session = tenant.getSessionRepository().createSession(applicationDirectory, applicationId, timeoutBudget, activeSessionId); tenant.getSessionRepository().addLocalSession(session); return session.getSessionId(); } public void deleteExpiredLocalSessions() { Map<Tenant, List<LocalSession>> sessionsPerTenant = new HashMap<>(); tenantRepository.getAllTenants().forEach(tenant -> sessionsPerTenant.put(tenant, tenant.getSessionRepository().getLocalSessions())); Set<ApplicationId> applicationIds = new HashSet<>(); sessionsPerTenant.values() .forEach(sessionList -> sessionList.stream() .map(Session::getOptionalApplicationId) .filter(Optional::isPresent) .forEach(appId -> applicationIds.add(appId.get()))); Map<ApplicationId, Long> activeSessions = new HashMap<>(); applicationIds.forEach(applicationId -> { RemoteSession activeSession = getActiveSession(applicationId); if (activeSession != null) activeSessions.put(applicationId, activeSession.getSessionId()); }); sessionsPerTenant.keySet().forEach(tenant -> tenant.getSessionRepository().deleteExpiredSessions(activeSessions)); } public int deleteExpiredSessionLocks(Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredLocks(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public int deleteExpiredRemoteSessions(Duration expiryTime) { return deleteExpiredRemoteSessions(clock, expiryTime); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredRemoteSessions(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public TenantRepository tenantRepository() { return tenantRepository; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().activeApplications(); } public ProtonMetricsResponse getProtonMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); ProtonMetricsRetriever protonMetricsRetriever = new ProtonMetricsRetriever(); return protonMetricsRetriever.getMetrics(application); } public DeploymentMetricsResponse getDeploymentMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); DeploymentMetricsRetriever deploymentMetricsRetriever = new DeploymentMetricsRetriever(); return deploymentMetricsRetriever.getMetrics(application); } public ApplicationMetaData getMetadataFromLocalSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } public ConfigserverConfig configserverConfig() { return configserverConfig; } public ApplicationId getApplicationIdForHostname(String hostname) { Optional<ApplicationId> applicationId = tenantRepository.getAllTenantNames().stream() .map(tenantName -> tenantRepository.getTenant(tenantName).getApplicationRepo().getApplicationIdForHostName(hostname)) .filter(Objects::nonNull) .findFirst(); return applicationId.orElse(null); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getSessionRepository().getLocalSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getSessionRepository().getRemoteSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } public Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private void cleanupTempDirectory(File tempDir) { logger.log(Level.FINE, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(Level.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } private RemoteSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getRemoteSession(tenant, applicationRepo.requireActiveSessionOf(applicationId)); } private RemoteSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getRemoteSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } public LocalSession getActiveLocalSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getLocalSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } private String getLogServerURI(ApplicationId applicationId, Optional<String> hostname) { if (hostname.isPresent() && HOSTED_VESPA_TENANT.equals(applicationId.tenant())) { return "http: } Application application = getApplication(applicationId); Collection<HostInfo> hostInfos = application.getModel().getHosts(); HostInfo logServerHostInfo = hostInfos.stream() .filter(host -> host.getServices().stream() .anyMatch(serviceInfo -> serviceInfo.getServiceType().equalsIgnoreCase("logserver"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find host info for logserver")); ServiceInfo serviceInfo = logServerHostInfo.getServices().stream().filter(service -> List.of(LOGSERVER_CONTAINER.serviceName, CONTAINER.serviceName).contains(service.getServiceType())) .findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host")); int port = servicePort(serviceInfo); return "http: } private int servicePort(ServiceInfo serviceInfo) { return serviceInfo.getPorts().stream() .filter(portInfo -> portInfo.getTags().stream().anyMatch(tag -> tag.equalsIgnoreCase("http"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port")) .getPort(); } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } public Zone zone() { return new Zone(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); } /** Emits as a metric the time in millis spent while holding this timer, with deployment ID as dimensions. */ public ActionTimer timerFor(ApplicationId id, String metricName) { return new ActionTimer(metric, clock, id, configserverConfig.environment(), configserverConfig.region(), metricName); } public static class ActionTimer implements AutoCloseable { private final Metric metric; private final Clock clock; private final ApplicationId id; private final String environment; private final String region; private final String name; private final Instant start; private ActionTimer(Metric metric, Clock clock, ApplicationId id, String environment, String region, String name) { this.metric = metric; this.clock = clock; this.id = id; this.environment = environment; this.region = region; this.name = name; this.start = clock.instant(); } @Override public void close() { metric.set(name, Duration.between(start, clock.instant()).toMillis(), metric.createContext(Map.of("applicationId", id.toFullString(), "tenantName", id.tenant().value(), "app", id.application().value() + "." + id.instance().value(), "zone", environment + "." + region))); } } }
Yes, the maintainer has been turned off for a long time, since it turned out that we needed more metadata to be able to safely delete unused tenants
public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) { if ( ! useTenantMetaData.value()) return Set.of(); return tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .filter(tenantName -> !tenantName.equals(HOSTED_VESPA_TENANT)) .filter(tenantName -> getTenantMetaData(tenantRepository.getTenant(tenantName)).lastDeployTimestamp().isBefore(now.minus(ttlForUnusedTenant))) .peek(tenantRepository::deleteTenant) .collect(Collectors.toSet()); }
if ( ! useTenantMetaData.value()) return Set.of();
public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) { if ( ! useTenantMetaData.value()) return Set.of(); return tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .filter(tenantName -> !tenantName.equals(HOSTED_VESPA_TENANT)) .filter(tenantName -> getTenantMetaData(tenantRepository.getTenant(tenantName)).lastDeployTimestamp().isBefore(now.minus(ttlForUnusedTenant))) .peek(tenantRepository::deleteTenant) .collect(Collectors.toSet()); }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final Optional<InfraDeployer> infraDeployer; private final ConfigConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final FileDistributionStatus fileDistributionStatus; private final Orchestrator orchestrator; private final LogRetriever logRetriever; private final TesterClient testerClient; private final Metric metric; private final BooleanFlag useTenantMetaData; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, InfraDeployerProvider infraDeployerProvider, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), infraDeployerProvider.getInfraDeployer(), configConvergenceChecker, httpProxy, configserverConfig, orchestrator, new LogRetriever(), new FileDistributionStatus(), Clock.systemUTC(), testerClient, metric, flagSource); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, Clock clock) { this(tenantRepository, hostProvisioner, orchestrator, new ConfigserverConfig(new ConfigserverConfig.Builder()), new LogRetriever(), clock, new TesterClient(), new NullMetric(), new InMemoryFlagSource()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, ConfigserverConfig configserverConfig, LogRetriever logRetriever, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, Optional.of(hostProvisioner), Optional.empty(), new ConfigConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), configserverConfig, orchestrator, logRetriever, new FileDistributionStatus(), clock, testerClient, metric, flagSource); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, Optional<InfraDeployer> infraDeployer, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, LogRetriever logRetriever, FileDistributionStatus fileDistributionStatus, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.infraDeployer = infraDeployer; this.convergeChecker = configConvergenceChecker; this.httpProxy = httpProxy; this.configserverConfig = configserverConfig; this.orchestrator = orchestrator; this.logRetriever = logRetriever; this.fileDistributionStatus = fileDistributionStatus; this.clock = clock; this.testerClient = testerClient; this.metric = metric; this.useTenantMetaData = Flags.USE_TENANT_META_DATA.bindTo(flagSource); } public Metric metric() { return metric; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); try (ActionTimer timer = timerFor(applicationId, "deployment.prepareMillis")) { SessionRepository sessionRepository = tenant.getSessionRepository(); ConfigChangeActions actions = sessionRepository.prepareLocalSession(session, logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = getTenant(applicationId); PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreSessionStaleFailure); return result; } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, false); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, boolean bootstrap) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)), bootstrap); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout, boolean bootstrap) { Optional<com.yahoo.config.provision.Deployment> infraDeployment = infraDeployer.flatMap(d -> d.getDeployment(application)); if (infraDeployment.isPresent()) return infraDeployment; Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveLocalSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); SessionRepository sessionRepository = tenant.getSessionRepository(); LocalSession newSession = sessionRepository.createSessionFromExisting(activeSession, logger, true, timeoutBudget); sessionRepository.addLocalSession(newSession); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, bootstrap)); } @Override public Optional<Instant> lastDeployTime(ApplicationId application) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); RemoteSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); return Optional.of(activeSession.getCreateTime()); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false); } public Transaction deactivateCurrentActivateNew(Session active, LocalSession prepared, boolean ignoreStaleSessionFailure) { Tenant tenant = tenantRepository.getTenant(prepared.getTenantName()); Transaction transaction = tenant.getSessionRepository().createActivateTransaction(prepared); if (active != null) { checkIfActiveHasChanged(prepared, active, ignoreStaleSessionFailure); checkIfActiveIsNewerThanSessionToBeActivated(prepared.getSessionId(), active.getSessionId()); transaction.add(active.createDeactivateTransaction().operations()); } if (useTenantMetaData.value()) transaction.add(writeTenantMetaData(tenant).operations()); return transaction; } private byte[] createMetaData(Tenant tenant) { return new TenantMetaData(tenant.getSessionRepository().clock().instant()).asJsonBytes(); } TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = tenantRepository.getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); return data.map(bytes -> TenantMetaData.fromJsonString(Utf8.toString(bytes))).orElse(new TenantMetaData(tenant.getCreatedTime())); } private Transaction writeTenantMetaData(Tenant tenant) { return new CuratorTransaction(tenantRepository.getCurator()) .add(CuratorOperations.setData(TenantRepository.getTenantPath(tenant.getName()).getAbsolute(), createMetaData(tenant))); } static void checkIfActiveHasChanged(LocalSession session, Session currentActiveSession, boolean ignoreStaleSessionFailure) { long activeSessionAtCreate = session.getActiveSessionAtCreate(); log.log(Level.FINE, currentActiveSession.logPre() + "active session id at create time=" + activeSessionAtCreate); if (activeSessionAtCreate == 0) return; long sessionId = session.getSessionId(); long currentActiveSessionSessionId = currentActiveSession.getSessionId(); log.log(Level.FINE, currentActiveSession.logPre() + "sessionId=" + sessionId + ", current active session=" + currentActiveSessionSessionId); if (currentActiveSession.isNewerThan(activeSessionAtCreate) && currentActiveSessionSessionId != sessionId) { String errMsg = currentActiveSession.logPre() + "Cannot activate session " + sessionId + " because the currently active session (" + currentActiveSessionSessionId + ") has changed since session " + sessionId + " was created (was " + activeSessionAtCreate + " at creation time)"; if (ignoreStaleSessionFailure) { log.warning(errMsg + " (Continuing because of force.)"); } else { throw new ActivationConflictException(errMsg); } } } static void checkIfActiveIsNewerThanSessionToBeActivated(long sessionId, long currentActiveSessionId) { if (sessionId < currentActiveSessionId) { throw new ActivationConflictException("It is not possible to activate session " + sessionId + ", because it is older than current active session (" + currentActiveSessionId + ")"); } } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ boolean delete(ApplicationId applicationId) { return delete(applicationId, Duration.ofSeconds(60)); } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ public boolean delete(ApplicationId applicationId, Duration waitTime) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { if ( ! tenantApplications.exists(applicationId)) return false; Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; long sessionId = activeSession.get(); RemoteSession remoteSession; try { remoteSession = getRemoteSession(tenant, sessionId); Transaction deleteTransaction = remoteSession.createDeleteTransaction(); deleteTransaction.commit(); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted"); if ( ! waitTime.isZero() && localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted"); } else { deleteTransaction.rollbackOrLog(); throw new InternalServerException(applicationId + " was not deleted (waited " + waitTime + "), session " + sessionId); } } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } } public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) { String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(getApplication(applicationId), hostName, CLUSTERCONTROLLER_CONTAINER.serviceName, relativePath); } public Long getApplicationGeneration(ApplicationId applicationId) { return getApplication(applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public boolean isSuspended(ApplicationId application) { return orchestrator.getAllSuspendedApplications().contains(application); } public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) { return fileDistributionStatus.status(getApplication(applicationId), timeout); } public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath, Duration keepFileReferences) { if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory"); Set<String> fileReferencesInUse = new HashSet<>(); for (var applicationId : listApplications()) { try { Optional<Application> app = getOptionalApplication(applicationId); if (app.isEmpty()) continue; fileReferencesInUse.addAll(app.get().getModel().fileReferences().stream() .map(FileReference::value) .collect(Collectors.toSet())); } catch (Exception e) { log.log(Level.WARNING, "Getting file references in use for '" + applicationId + "' failed", e); } } log.log(Level.FINE, "File references in use : " + fileReferencesInUse); Set<String> fileReferencesOnDisk = getFileReferencesOnDisk(fileReferencesPath); log.log(Level.FINE, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk); Instant instant = Instant.now().minus(keepFileReferences); Set<String> fileReferencesToDelete = fileReferencesOnDisk .stream() .filter(fileReference -> ! fileReferencesInUse.contains(fileReference)) .filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant)) .collect(Collectors.toSet()); if (fileReferencesToDelete.size() > 0) { log.log(Level.FINE, "Will delete file references not in use: " + fileReferencesToDelete); fileReferencesToDelete.forEach(fileReference -> { File file = new File(fileReferencesPath, fileReference); if ( ! IOUtils.recursiveDeleteDir(file)) log.log(Level.WARNING, "Could not delete " + file.getAbsolutePath()); }); } return fileReferencesToDelete; } public Set<FileReference> getFileReferences(ApplicationId applicationId) { return getOptionalApplication(applicationId).map(app -> app.getModel().fileReferences()).orElse(Set.of()); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } public Tenant getTenant(ApplicationId applicationId) { return tenantRepository.getTenant(applicationId.tenant()); } private Application getApplication(ApplicationId applicationId) { return getApplication(applicationId, Optional.empty()); } private Application getApplication(ApplicationId applicationId, Optional<Version> version) { try { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = getRemoteSession(tenant, sessionId); return session.ensureApplicationLoaded().getForVersionOrLatest(version, clock.instant()); } catch (NotFoundException e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "': " + e.getMessage()); throw e; } catch (Exception e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "'", e); throw e; } } private Optional<Application> getOptionalApplication(ApplicationId applicationId) { try { return Optional.of(getApplication(applicationId)); } catch (Exception e) { return Optional.empty(); } } public Set<ApplicationId> listApplications() { return tenantRepository.getAllTenants().stream() .flatMap(tenant -> tenant.getApplicationRepo().activeApplications().stream()) .collect(Collectors.toSet()); } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) { SessionRepository sessionRepository = getTenant(applicationId).getSessionRepository(); Instant end = Instant.now().plus(waitTime); do { if (sessionRepository.getRemoteSession(sessionId) == null) return true; try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */} } while (Instant.now().isBefore(end)); return false; } public Optional<String> getApplicationPackageReference(ApplicationId applicationId) { Optional<String> applicationPackage = Optional.empty(); RemoteSession session = getActiveSession(applicationId); if (session != null) { FileReference applicationPackageReference = session.getApplicationPackageReference(); File downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig().fileReferencesDir())); if (applicationPackageReference != null && ! fileReferenceExistsOnDisk(downloadDirectory, applicationPackageReference)) applicationPackage = Optional.of(applicationPackageReference.value()); } return applicationPackage; } public List<Version> getAllVersions(ApplicationId applicationId) { Optional<ApplicationSet> applicationSet = getCurrentActiveApplicationSet(getTenant(applicationId), applicationId); if (applicationSet.isEmpty()) return List.of(); else return applicationSet.get().getAllVersions(applicationId); } public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri, Duration timeout, Optional<Version> vespaVersion) { return convergeChecker.checkService(getApplication(applicationId, vespaVersion), hostAndPort, uri, timeout); } public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri, Duration timeoutPerService, Optional<Version> vespaVersion) { return convergeChecker.servicesToCheck(getApplication(applicationId, vespaVersion), uri, timeoutPerService); } public HttpResponse getLogs(ApplicationId applicationId, Optional<String> hostname, String apiParams) { String logServerURI = getLogServerURI(applicationId, hostname) + apiParams; return logRetriever.getLogs(logServerURI); } public HttpResponse getTesterStatus(ApplicationId applicationId) { return testerClient.getStatus(getTesterHostname(applicationId), getTesterPort(applicationId)); } public HttpResponse getTesterLog(ApplicationId applicationId, Long after) { return testerClient.getLog(getTesterHostname(applicationId), getTesterPort(applicationId), after); } public HttpResponse startTests(ApplicationId applicationId, String suite, byte[] config) { return testerClient.startTests(getTesterHostname(applicationId), getTesterPort(applicationId), suite, config); } public HttpResponse isTesterReady(ApplicationId applicationId) { return testerClient.isTesterReady(getTesterHostname(applicationId), getTesterPort(applicationId)); } private String getTesterHostname(ApplicationId applicationId) { return getTesterServiceInfo(applicationId).getHostName(); } private int getTesterPort(ApplicationId applicationId) { ServiceInfo serviceInfo = getTesterServiceInfo(applicationId); return serviceInfo.getPorts().stream().filter(portInfo -> portInfo.getTags().contains("http")).findFirst().get().getPort(); } private ServiceInfo getTesterServiceInfo(ApplicationId applicationId) { Application application = getApplication(applicationId); return application.getModel().getHosts().stream() .findFirst().orElseThrow(() -> new InternalServerException("Could not find any host for tester app " + applicationId.toFullString())) .getServices().stream() .filter(service -> CONTAINER.serviceName.equals(service.getServiceType())) .findFirst() .orElseThrow(() -> new InternalServerException("Could not find any tester container for tester app " + applicationId.toFullString())); } public CompletionWaiter activate(LocalSession session, Session previousActiveSession, ApplicationId applicationId, boolean ignoreSessionStaleFailure) { CompletionWaiter waiter = session.getSessionZooKeeperClient().createActiveWaiter(); NestedTransaction transaction = new NestedTransaction(); transaction.add(deactivateCurrentActivateNew(previousActiveSession, session, ignoreSessionStaleFailure)); hostProvisioner.ifPresent(provisioner -> provisioner.activate(transaction, applicationId, session.getAllocatedHosts().getHosts())); transaction.commit(); return waiter; } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public RemoteSession getActiveSession(ApplicationId applicationId) { return getActiveSession(getTenant(applicationId), applicationId); } public long getSessionIdForApplication(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); return getSessionIdForApplication(tenant, applicationId); } private long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (! applicationRepo.exists(applicationId)) throw new NotFoundException("Unknown application id '" + applicationId + "'"); return applicationRepo.requireActiveSessionOf(applicationId); } public void validateThatSessionIsNotActive(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatSessionIsPrepared(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if ( ! Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { Tenant tenant = getTenant(applicationId); SessionRepository sessionRepository = tenant.getSessionRepository(); RemoteSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionRepository.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget); sessionRepository.addLocalSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = getTenant(applicationId); tenant.getApplicationRepo().createApplication(applicationId); Optional<Long> activeSessionId = tenant.getApplicationRepo().activeSessionOf(applicationId); LocalSession session = tenant.getSessionRepository().createSession(applicationDirectory, applicationId, timeoutBudget, activeSessionId); tenant.getSessionRepository().addLocalSession(session); return session.getSessionId(); } public void deleteExpiredLocalSessions() { Map<Tenant, List<LocalSession>> sessionsPerTenant = new HashMap<>(); tenantRepository.getAllTenants().forEach(tenant -> sessionsPerTenant.put(tenant, tenant.getSessionRepository().getLocalSessions())); Set<ApplicationId> applicationIds = new HashSet<>(); sessionsPerTenant.values() .forEach(sessionList -> sessionList.stream() .map(Session::getOptionalApplicationId) .filter(Optional::isPresent) .forEach(appId -> applicationIds.add(appId.get()))); Map<ApplicationId, Long> activeSessions = new HashMap<>(); applicationIds.forEach(applicationId -> { RemoteSession activeSession = getActiveSession(applicationId); if (activeSession != null) activeSessions.put(applicationId, activeSession.getSessionId()); }); sessionsPerTenant.keySet().forEach(tenant -> tenant.getSessionRepository().deleteExpiredSessions(activeSessions)); } public int deleteExpiredSessionLocks(Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredLocks(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public int deleteExpiredRemoteSessions(Duration expiryTime) { return deleteExpiredRemoteSessions(clock, expiryTime); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredRemoteSessions(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public TenantRepository tenantRepository() { return tenantRepository; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().activeApplications(); } public ProtonMetricsResponse getProtonMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); ProtonMetricsRetriever protonMetricsRetriever = new ProtonMetricsRetriever(); return protonMetricsRetriever.getMetrics(application); } public DeploymentMetricsResponse getDeploymentMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); DeploymentMetricsRetriever deploymentMetricsRetriever = new DeploymentMetricsRetriever(); return deploymentMetricsRetriever.getMetrics(application); } public ApplicationMetaData getMetadataFromLocalSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } public ConfigserverConfig configserverConfig() { return configserverConfig; } public ApplicationId getApplicationIdForHostname(String hostname) { Optional<ApplicationId> applicationId = tenantRepository.getAllTenantNames().stream() .map(tenantName -> tenantRepository.getTenant(tenantName).getApplicationRepo().getApplicationIdForHostName(hostname)) .filter(Objects::nonNull) .findFirst(); return applicationId.orElse(null); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getSessionRepository().getLocalSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getSessionRepository().getRemoteSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } public Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private void cleanupTempDirectory(File tempDir) { logger.log(Level.FINE, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(Level.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } private RemoteSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getRemoteSession(tenant, applicationRepo.requireActiveSessionOf(applicationId)); } private RemoteSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getRemoteSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } public LocalSession getActiveLocalSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getLocalSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } private String getLogServerURI(ApplicationId applicationId, Optional<String> hostname) { if (hostname.isPresent() && HOSTED_VESPA_TENANT.equals(applicationId.tenant())) { return "http: } Application application = getApplication(applicationId); Collection<HostInfo> hostInfos = application.getModel().getHosts(); HostInfo logServerHostInfo = hostInfos.stream() .filter(host -> host.getServices().stream() .anyMatch(serviceInfo -> serviceInfo.getServiceType().equalsIgnoreCase("logserver"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find host info for logserver")); ServiceInfo serviceInfo = logServerHostInfo.getServices().stream().filter(service -> List.of(LOGSERVER_CONTAINER.serviceName, CONTAINER.serviceName).contains(service.getServiceType())) .findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host")); int port = servicePort(serviceInfo); return "http: } private int servicePort(ServiceInfo serviceInfo) { return serviceInfo.getPorts().stream() .filter(portInfo -> portInfo.getTags().stream().anyMatch(tag -> tag.equalsIgnoreCase("http"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port")) .getPort(); } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } public Zone zone() { return new Zone(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); } /** Emits as a metric the time in millis spent while holding this timer, with deployment ID as dimensions. */ public ActionTimer timerFor(ApplicationId id, String metricName) { return new ActionTimer(metric, clock, id, configserverConfig.environment(), configserverConfig.region(), metricName); } public static class ActionTimer implements AutoCloseable { private final Metric metric; private final Clock clock; private final ApplicationId id; private final String environment; private final String region; private final String name; private final Instant start; private ActionTimer(Metric metric, Clock clock, ApplicationId id, String environment, String region, String name) { this.metric = metric; this.clock = clock; this.id = id; this.environment = environment; this.region = region; this.name = name; this.start = clock.instant(); } @Override public void close() { metric.set(name, Duration.between(start, clock.instant()).toMillis(), metric.createContext(Map.of("applicationId", id.toFullString(), "tenantName", id.tenant().value(), "app", id.application().value() + "." + id.instance().value(), "zone", environment + "." + region))); } } }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final Optional<InfraDeployer> infraDeployer; private final ConfigConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final FileDistributionStatus fileDistributionStatus; private final Orchestrator orchestrator; private final LogRetriever logRetriever; private final TesterClient testerClient; private final Metric metric; private final BooleanFlag useTenantMetaData; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, InfraDeployerProvider infraDeployerProvider, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), infraDeployerProvider.getInfraDeployer(), configConvergenceChecker, httpProxy, configserverConfig, orchestrator, new LogRetriever(), new FileDistributionStatus(), Clock.systemUTC(), testerClient, metric, flagSource); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, Clock clock) { this(tenantRepository, hostProvisioner, orchestrator, new ConfigserverConfig(new ConfigserverConfig.Builder()), new LogRetriever(), clock, new TesterClient(), new NullMetric(), new InMemoryFlagSource()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, ConfigserverConfig configserverConfig, LogRetriever logRetriever, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, Optional.of(hostProvisioner), Optional.empty(), new ConfigConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), configserverConfig, orchestrator, logRetriever, new FileDistributionStatus(), clock, testerClient, metric, flagSource); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, Optional<InfraDeployer> infraDeployer, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, LogRetriever logRetriever, FileDistributionStatus fileDistributionStatus, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.infraDeployer = infraDeployer; this.convergeChecker = configConvergenceChecker; this.httpProxy = httpProxy; this.configserverConfig = configserverConfig; this.orchestrator = orchestrator; this.logRetriever = logRetriever; this.fileDistributionStatus = fileDistributionStatus; this.clock = clock; this.testerClient = testerClient; this.metric = metric; this.useTenantMetaData = Flags.USE_TENANT_META_DATA.bindTo(flagSource); } public Metric metric() { return metric; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); try (ActionTimer timer = timerFor(applicationId, "deployment.prepareMillis")) { SessionRepository sessionRepository = tenant.getSessionRepository(); ConfigChangeActions actions = sessionRepository.prepareLocalSession(session, logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = getTenant(applicationId); PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreSessionStaleFailure); return result; } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, false); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, boolean bootstrap) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)), bootstrap); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout, boolean bootstrap) { Optional<com.yahoo.config.provision.Deployment> infraDeployment = infraDeployer.flatMap(d -> d.getDeployment(application)); if (infraDeployment.isPresent()) return infraDeployment; Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveLocalSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); SessionRepository sessionRepository = tenant.getSessionRepository(); LocalSession newSession = sessionRepository.createSessionFromExisting(activeSession, logger, true, timeoutBudget); sessionRepository.addLocalSession(newSession); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, bootstrap)); } @Override public Optional<Instant> lastDeployTime(ApplicationId application) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); RemoteSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); return Optional.of(activeSession.getCreateTime()); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false); } public Transaction deactivateCurrentActivateNew(Session active, LocalSession prepared, boolean ignoreStaleSessionFailure) { Tenant tenant = tenantRepository.getTenant(prepared.getTenantName()); Transaction transaction = tenant.getSessionRepository().createActivateTransaction(prepared); if (active != null) { checkIfActiveHasChanged(prepared, active, ignoreStaleSessionFailure); checkIfActiveIsNewerThanSessionToBeActivated(prepared.getSessionId(), active.getSessionId()); transaction.add(active.createDeactivateTransaction().operations()); } if (useTenantMetaData.value()) transaction.add(writeTenantMetaData(tenant).operations()); return transaction; } private byte[] createMetaData(Tenant tenant) { return new TenantMetaData(tenant.getSessionRepository().clock().instant()).asJsonBytes(); } TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = tenantRepository.getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); return data.map(bytes -> TenantMetaData.fromJsonString(Utf8.toString(bytes))).orElse(new TenantMetaData(tenant.getCreatedTime())); } private Transaction writeTenantMetaData(Tenant tenant) { return new CuratorTransaction(tenantRepository.getCurator()) .add(CuratorOperations.setData(TenantRepository.getTenantPath(tenant.getName()).getAbsolute(), createMetaData(tenant))); } static void checkIfActiveHasChanged(LocalSession session, Session currentActiveSession, boolean ignoreStaleSessionFailure) { long activeSessionAtCreate = session.getActiveSessionAtCreate(); log.log(Level.FINE, currentActiveSession.logPre() + "active session id at create time=" + activeSessionAtCreate); if (activeSessionAtCreate == 0) return; long sessionId = session.getSessionId(); long currentActiveSessionSessionId = currentActiveSession.getSessionId(); log.log(Level.FINE, currentActiveSession.logPre() + "sessionId=" + sessionId + ", current active session=" + currentActiveSessionSessionId); if (currentActiveSession.isNewerThan(activeSessionAtCreate) && currentActiveSessionSessionId != sessionId) { String errMsg = currentActiveSession.logPre() + "Cannot activate session " + sessionId + " because the currently active session (" + currentActiveSessionSessionId + ") has changed since session " + sessionId + " was created (was " + activeSessionAtCreate + " at creation time)"; if (ignoreStaleSessionFailure) { log.warning(errMsg + " (Continuing because of force.)"); } else { throw new ActivationConflictException(errMsg); } } } static void checkIfActiveIsNewerThanSessionToBeActivated(long sessionId, long currentActiveSessionId) { if (sessionId < currentActiveSessionId) { throw new ActivationConflictException("It is not possible to activate session " + sessionId + ", because it is older than current active session (" + currentActiveSessionId + ")"); } } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ boolean delete(ApplicationId applicationId) { return delete(applicationId, Duration.ofSeconds(60)); } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ public boolean delete(ApplicationId applicationId, Duration waitTime) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { if ( ! tenantApplications.exists(applicationId)) return false; Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; long sessionId = activeSession.get(); RemoteSession remoteSession; try { remoteSession = getRemoteSession(tenant, sessionId); Transaction deleteTransaction = remoteSession.createDeleteTransaction(); deleteTransaction.commit(); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted"); if ( ! waitTime.isZero() && localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted"); } else { deleteTransaction.rollbackOrLog(); throw new InternalServerException(applicationId + " was not deleted (waited " + waitTime + "), session " + sessionId); } } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } } public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) { String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(getApplication(applicationId), hostName, CLUSTERCONTROLLER_CONTAINER.serviceName, relativePath); } public Long getApplicationGeneration(ApplicationId applicationId) { return getApplication(applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public boolean isSuspended(ApplicationId application) { return orchestrator.getAllSuspendedApplications().contains(application); } public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) { return fileDistributionStatus.status(getApplication(applicationId), timeout); } public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath, Duration keepFileReferences) { if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory"); Set<String> fileReferencesInUse = new HashSet<>(); for (var applicationId : listApplications()) { try { Optional<Application> app = getOptionalApplication(applicationId); if (app.isEmpty()) continue; fileReferencesInUse.addAll(app.get().getModel().fileReferences().stream() .map(FileReference::value) .collect(Collectors.toSet())); } catch (Exception e) { log.log(Level.WARNING, "Getting file references in use for '" + applicationId + "' failed", e); } } log.log(Level.FINE, "File references in use : " + fileReferencesInUse); Set<String> fileReferencesOnDisk = getFileReferencesOnDisk(fileReferencesPath); log.log(Level.FINE, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk); Instant instant = Instant.now().minus(keepFileReferences); Set<String> fileReferencesToDelete = fileReferencesOnDisk .stream() .filter(fileReference -> ! fileReferencesInUse.contains(fileReference)) .filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant)) .collect(Collectors.toSet()); if (fileReferencesToDelete.size() > 0) { log.log(Level.FINE, "Will delete file references not in use: " + fileReferencesToDelete); fileReferencesToDelete.forEach(fileReference -> { File file = new File(fileReferencesPath, fileReference); if ( ! IOUtils.recursiveDeleteDir(file)) log.log(Level.WARNING, "Could not delete " + file.getAbsolutePath()); }); } return fileReferencesToDelete; } public Set<FileReference> getFileReferences(ApplicationId applicationId) { return getOptionalApplication(applicationId).map(app -> app.getModel().fileReferences()).orElse(Set.of()); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } public Tenant getTenant(ApplicationId applicationId) { return tenantRepository.getTenant(applicationId.tenant()); } private Application getApplication(ApplicationId applicationId) { return getApplication(applicationId, Optional.empty()); } private Application getApplication(ApplicationId applicationId, Optional<Version> version) { try { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = getRemoteSession(tenant, sessionId); return session.ensureApplicationLoaded().getForVersionOrLatest(version, clock.instant()); } catch (NotFoundException e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "': " + e.getMessage()); throw e; } catch (Exception e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "'", e); throw e; } } private Optional<Application> getOptionalApplication(ApplicationId applicationId) { try { return Optional.of(getApplication(applicationId)); } catch (Exception e) { return Optional.empty(); } } public Set<ApplicationId> listApplications() { return tenantRepository.getAllTenants().stream() .flatMap(tenant -> tenant.getApplicationRepo().activeApplications().stream()) .collect(Collectors.toSet()); } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) { SessionRepository sessionRepository = getTenant(applicationId).getSessionRepository(); Instant end = Instant.now().plus(waitTime); do { if (sessionRepository.getRemoteSession(sessionId) == null) return true; try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */} } while (Instant.now().isBefore(end)); return false; } public Optional<String> getApplicationPackageReference(ApplicationId applicationId) { Optional<String> applicationPackage = Optional.empty(); RemoteSession session = getActiveSession(applicationId); if (session != null) { FileReference applicationPackageReference = session.getApplicationPackageReference(); File downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig().fileReferencesDir())); if (applicationPackageReference != null && ! fileReferenceExistsOnDisk(downloadDirectory, applicationPackageReference)) applicationPackage = Optional.of(applicationPackageReference.value()); } return applicationPackage; } public List<Version> getAllVersions(ApplicationId applicationId) { Optional<ApplicationSet> applicationSet = getCurrentActiveApplicationSet(getTenant(applicationId), applicationId); if (applicationSet.isEmpty()) return List.of(); else return applicationSet.get().getAllVersions(applicationId); } public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri, Duration timeout, Optional<Version> vespaVersion) { return convergeChecker.checkService(getApplication(applicationId, vespaVersion), hostAndPort, uri, timeout); } public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri, Duration timeoutPerService, Optional<Version> vespaVersion) { return convergeChecker.servicesToCheck(getApplication(applicationId, vespaVersion), uri, timeoutPerService); } public HttpResponse getLogs(ApplicationId applicationId, Optional<String> hostname, String apiParams) { String logServerURI = getLogServerURI(applicationId, hostname) + apiParams; return logRetriever.getLogs(logServerURI); } public HttpResponse getTesterStatus(ApplicationId applicationId) { return testerClient.getStatus(getTesterHostname(applicationId), getTesterPort(applicationId)); } public HttpResponse getTesterLog(ApplicationId applicationId, Long after) { return testerClient.getLog(getTesterHostname(applicationId), getTesterPort(applicationId), after); } public HttpResponse startTests(ApplicationId applicationId, String suite, byte[] config) { return testerClient.startTests(getTesterHostname(applicationId), getTesterPort(applicationId), suite, config); } public HttpResponse isTesterReady(ApplicationId applicationId) { return testerClient.isTesterReady(getTesterHostname(applicationId), getTesterPort(applicationId)); } private String getTesterHostname(ApplicationId applicationId) { return getTesterServiceInfo(applicationId).getHostName(); } private int getTesterPort(ApplicationId applicationId) { ServiceInfo serviceInfo = getTesterServiceInfo(applicationId); return serviceInfo.getPorts().stream().filter(portInfo -> portInfo.getTags().contains("http")).findFirst().get().getPort(); } private ServiceInfo getTesterServiceInfo(ApplicationId applicationId) { Application application = getApplication(applicationId); return application.getModel().getHosts().stream() .findFirst().orElseThrow(() -> new InternalServerException("Could not find any host for tester app " + applicationId.toFullString())) .getServices().stream() .filter(service -> CONTAINER.serviceName.equals(service.getServiceType())) .findFirst() .orElseThrow(() -> new InternalServerException("Could not find any tester container for tester app " + applicationId.toFullString())); } public CompletionWaiter activate(LocalSession session, Session previousActiveSession, ApplicationId applicationId, boolean ignoreSessionStaleFailure) { CompletionWaiter waiter = session.getSessionZooKeeperClient().createActiveWaiter(); NestedTransaction transaction = new NestedTransaction(); transaction.add(deactivateCurrentActivateNew(previousActiveSession, session, ignoreSessionStaleFailure)); hostProvisioner.ifPresent(provisioner -> provisioner.activate(transaction, applicationId, session.getAllocatedHosts().getHosts())); transaction.commit(); return waiter; } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public RemoteSession getActiveSession(ApplicationId applicationId) { return getActiveSession(getTenant(applicationId), applicationId); } public long getSessionIdForApplication(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); return getSessionIdForApplication(tenant, applicationId); } private long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (! applicationRepo.exists(applicationId)) throw new NotFoundException("Unknown application id '" + applicationId + "'"); return applicationRepo.requireActiveSessionOf(applicationId); } public void validateThatSessionIsNotActive(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatSessionIsPrepared(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if ( ! Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { Tenant tenant = getTenant(applicationId); SessionRepository sessionRepository = tenant.getSessionRepository(); RemoteSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionRepository.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget); sessionRepository.addLocalSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = getTenant(applicationId); tenant.getApplicationRepo().createApplication(applicationId); Optional<Long> activeSessionId = tenant.getApplicationRepo().activeSessionOf(applicationId); LocalSession session = tenant.getSessionRepository().createSession(applicationDirectory, applicationId, timeoutBudget, activeSessionId); tenant.getSessionRepository().addLocalSession(session); return session.getSessionId(); } public void deleteExpiredLocalSessions() { Map<Tenant, List<LocalSession>> sessionsPerTenant = new HashMap<>(); tenantRepository.getAllTenants().forEach(tenant -> sessionsPerTenant.put(tenant, tenant.getSessionRepository().getLocalSessions())); Set<ApplicationId> applicationIds = new HashSet<>(); sessionsPerTenant.values() .forEach(sessionList -> sessionList.stream() .map(Session::getOptionalApplicationId) .filter(Optional::isPresent) .forEach(appId -> applicationIds.add(appId.get()))); Map<ApplicationId, Long> activeSessions = new HashMap<>(); applicationIds.forEach(applicationId -> { RemoteSession activeSession = getActiveSession(applicationId); if (activeSession != null) activeSessions.put(applicationId, activeSession.getSessionId()); }); sessionsPerTenant.keySet().forEach(tenant -> tenant.getSessionRepository().deleteExpiredSessions(activeSessions)); } public int deleteExpiredSessionLocks(Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredLocks(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public int deleteExpiredRemoteSessions(Duration expiryTime) { return deleteExpiredRemoteSessions(clock, expiryTime); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredRemoteSessions(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public TenantRepository tenantRepository() { return tenantRepository; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().activeApplications(); } public ProtonMetricsResponse getProtonMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); ProtonMetricsRetriever protonMetricsRetriever = new ProtonMetricsRetriever(); return protonMetricsRetriever.getMetrics(application); } public DeploymentMetricsResponse getDeploymentMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); DeploymentMetricsRetriever deploymentMetricsRetriever = new DeploymentMetricsRetriever(); return deploymentMetricsRetriever.getMetrics(application); } public ApplicationMetaData getMetadataFromLocalSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } public ConfigserverConfig configserverConfig() { return configserverConfig; } public ApplicationId getApplicationIdForHostname(String hostname) { Optional<ApplicationId> applicationId = tenantRepository.getAllTenantNames().stream() .map(tenantName -> tenantRepository.getTenant(tenantName).getApplicationRepo().getApplicationIdForHostName(hostname)) .filter(Objects::nonNull) .findFirst(); return applicationId.orElse(null); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getSessionRepository().getLocalSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getSessionRepository().getRemoteSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } public Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private void cleanupTempDirectory(File tempDir) { logger.log(Level.FINE, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(Level.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } private RemoteSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getRemoteSession(tenant, applicationRepo.requireActiveSessionOf(applicationId)); } private RemoteSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getRemoteSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } public LocalSession getActiveLocalSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getLocalSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } private String getLogServerURI(ApplicationId applicationId, Optional<String> hostname) { if (hostname.isPresent() && HOSTED_VESPA_TENANT.equals(applicationId.tenant())) { return "http: } Application application = getApplication(applicationId); Collection<HostInfo> hostInfos = application.getModel().getHosts(); HostInfo logServerHostInfo = hostInfos.stream() .filter(host -> host.getServices().stream() .anyMatch(serviceInfo -> serviceInfo.getServiceType().equalsIgnoreCase("logserver"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find host info for logserver")); ServiceInfo serviceInfo = logServerHostInfo.getServices().stream().filter(service -> List.of(LOGSERVER_CONTAINER.serviceName, CONTAINER.serviceName).contains(service.getServiceType())) .findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host")); int port = servicePort(serviceInfo); return "http: } private int servicePort(ServiceInfo serviceInfo) { return serviceInfo.getPorts().stream() .filter(portInfo -> portInfo.getTags().stream().anyMatch(tag -> tag.equalsIgnoreCase("http"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port")) .getPort(); } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } public Zone zone() { return new Zone(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); } /** Emits as a metric the time in millis spent while holding this timer, with deployment ID as dimensions. */ public ActionTimer timerFor(ApplicationId id, String metricName) { return new ActionTimer(metric, clock, id, configserverConfig.environment(), configserverConfig.region(), metricName); } public static class ActionTimer implements AutoCloseable { private final Metric metric; private final Clock clock; private final ApplicationId id; private final String environment; private final String region; private final String name; private final Instant start; private ActionTimer(Metric metric, Clock clock, ApplicationId id, String environment, String region, String name) { this.metric = metric; this.clock = clock; this.id = id; this.environment = environment; this.region = region; this.name = name; this.start = clock.instant(); } @Override public void close() { metric.set(name, Duration.between(start, clock.instant()).toMillis(), metric.createContext(Map.of("applicationId", id.toFullString(), "tenantName", id.tenant().value(), "app", id.application().value() + "." + id.instance().value(), "zone", environment + "." + region))); } } }
Ah, used by the maintainer.
public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) { if ( ! useTenantMetaData.value()) return Set.of(); return tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .filter(tenantName -> !tenantName.equals(HOSTED_VESPA_TENANT)) .filter(tenantName -> getTenantMetaData(tenantRepository.getTenant(tenantName)).lastDeployTimestamp().isBefore(now.minus(ttlForUnusedTenant))) .peek(tenantRepository::deleteTenant) .collect(Collectors.toSet()); }
if ( ! useTenantMetaData.value()) return Set.of();
public Set<TenantName> deleteUnusedTenants(Duration ttlForUnusedTenant, Instant now) { if ( ! useTenantMetaData.value()) return Set.of(); return tenantRepository.getAllTenantNames().stream() .filter(tenantName -> activeApplications(tenantName).isEmpty()) .filter(tenantName -> !tenantName.equals(TenantName.defaultName())) .filter(tenantName -> !tenantName.equals(HOSTED_VESPA_TENANT)) .filter(tenantName -> getTenantMetaData(tenantRepository.getTenant(tenantName)).lastDeployTimestamp().isBefore(now.minus(ttlForUnusedTenant))) .peek(tenantRepository::deleteTenant) .collect(Collectors.toSet()); }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final Optional<InfraDeployer> infraDeployer; private final ConfigConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final FileDistributionStatus fileDistributionStatus; private final Orchestrator orchestrator; private final LogRetriever logRetriever; private final TesterClient testerClient; private final Metric metric; private final BooleanFlag useTenantMetaData; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, InfraDeployerProvider infraDeployerProvider, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), infraDeployerProvider.getInfraDeployer(), configConvergenceChecker, httpProxy, configserverConfig, orchestrator, new LogRetriever(), new FileDistributionStatus(), Clock.systemUTC(), testerClient, metric, flagSource); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, Clock clock) { this(tenantRepository, hostProvisioner, orchestrator, new ConfigserverConfig(new ConfigserverConfig.Builder()), new LogRetriever(), clock, new TesterClient(), new NullMetric(), new InMemoryFlagSource()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, ConfigserverConfig configserverConfig, LogRetriever logRetriever, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, Optional.of(hostProvisioner), Optional.empty(), new ConfigConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), configserverConfig, orchestrator, logRetriever, new FileDistributionStatus(), clock, testerClient, metric, flagSource); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, Optional<InfraDeployer> infraDeployer, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, LogRetriever logRetriever, FileDistributionStatus fileDistributionStatus, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.infraDeployer = infraDeployer; this.convergeChecker = configConvergenceChecker; this.httpProxy = httpProxy; this.configserverConfig = configserverConfig; this.orchestrator = orchestrator; this.logRetriever = logRetriever; this.fileDistributionStatus = fileDistributionStatus; this.clock = clock; this.testerClient = testerClient; this.metric = metric; this.useTenantMetaData = Flags.USE_TENANT_META_DATA.bindTo(flagSource); } public Metric metric() { return metric; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); try (ActionTimer timer = timerFor(applicationId, "deployment.prepareMillis")) { SessionRepository sessionRepository = tenant.getSessionRepository(); ConfigChangeActions actions = sessionRepository.prepareLocalSession(session, logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = getTenant(applicationId); PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreSessionStaleFailure); return result; } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, false); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, boolean bootstrap) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)), bootstrap); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout, boolean bootstrap) { Optional<com.yahoo.config.provision.Deployment> infraDeployment = infraDeployer.flatMap(d -> d.getDeployment(application)); if (infraDeployment.isPresent()) return infraDeployment; Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveLocalSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); SessionRepository sessionRepository = tenant.getSessionRepository(); LocalSession newSession = sessionRepository.createSessionFromExisting(activeSession, logger, true, timeoutBudget); sessionRepository.addLocalSession(newSession); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, bootstrap)); } @Override public Optional<Instant> lastDeployTime(ApplicationId application) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); RemoteSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); return Optional.of(activeSession.getCreateTime()); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false); } public Transaction deactivateCurrentActivateNew(Session active, LocalSession prepared, boolean ignoreStaleSessionFailure) { Tenant tenant = tenantRepository.getTenant(prepared.getTenantName()); Transaction transaction = tenant.getSessionRepository().createActivateTransaction(prepared); if (active != null) { checkIfActiveHasChanged(prepared, active, ignoreStaleSessionFailure); checkIfActiveIsNewerThanSessionToBeActivated(prepared.getSessionId(), active.getSessionId()); transaction.add(active.createDeactivateTransaction().operations()); } if (useTenantMetaData.value()) transaction.add(writeTenantMetaData(tenant).operations()); return transaction; } private byte[] createMetaData(Tenant tenant) { return new TenantMetaData(tenant.getSessionRepository().clock().instant()).asJsonBytes(); } TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = tenantRepository.getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); return data.map(bytes -> TenantMetaData.fromJsonString(Utf8.toString(bytes))).orElse(new TenantMetaData(tenant.getCreatedTime())); } private Transaction writeTenantMetaData(Tenant tenant) { return new CuratorTransaction(tenantRepository.getCurator()) .add(CuratorOperations.setData(TenantRepository.getTenantPath(tenant.getName()).getAbsolute(), createMetaData(tenant))); } static void checkIfActiveHasChanged(LocalSession session, Session currentActiveSession, boolean ignoreStaleSessionFailure) { long activeSessionAtCreate = session.getActiveSessionAtCreate(); log.log(Level.FINE, currentActiveSession.logPre() + "active session id at create time=" + activeSessionAtCreate); if (activeSessionAtCreate == 0) return; long sessionId = session.getSessionId(); long currentActiveSessionSessionId = currentActiveSession.getSessionId(); log.log(Level.FINE, currentActiveSession.logPre() + "sessionId=" + sessionId + ", current active session=" + currentActiveSessionSessionId); if (currentActiveSession.isNewerThan(activeSessionAtCreate) && currentActiveSessionSessionId != sessionId) { String errMsg = currentActiveSession.logPre() + "Cannot activate session " + sessionId + " because the currently active session (" + currentActiveSessionSessionId + ") has changed since session " + sessionId + " was created (was " + activeSessionAtCreate + " at creation time)"; if (ignoreStaleSessionFailure) { log.warning(errMsg + " (Continuing because of force.)"); } else { throw new ActivationConflictException(errMsg); } } } static void checkIfActiveIsNewerThanSessionToBeActivated(long sessionId, long currentActiveSessionId) { if (sessionId < currentActiveSessionId) { throw new ActivationConflictException("It is not possible to activate session " + sessionId + ", because it is older than current active session (" + currentActiveSessionId + ")"); } } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ boolean delete(ApplicationId applicationId) { return delete(applicationId, Duration.ofSeconds(60)); } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ public boolean delete(ApplicationId applicationId, Duration waitTime) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { if ( ! tenantApplications.exists(applicationId)) return false; Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; long sessionId = activeSession.get(); RemoteSession remoteSession; try { remoteSession = getRemoteSession(tenant, sessionId); Transaction deleteTransaction = remoteSession.createDeleteTransaction(); deleteTransaction.commit(); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted"); if ( ! waitTime.isZero() && localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted"); } else { deleteTransaction.rollbackOrLog(); throw new InternalServerException(applicationId + " was not deleted (waited " + waitTime + "), session " + sessionId); } } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } } public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) { String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(getApplication(applicationId), hostName, CLUSTERCONTROLLER_CONTAINER.serviceName, relativePath); } public Long getApplicationGeneration(ApplicationId applicationId) { return getApplication(applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public boolean isSuspended(ApplicationId application) { return orchestrator.getAllSuspendedApplications().contains(application); } public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) { return fileDistributionStatus.status(getApplication(applicationId), timeout); } public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath, Duration keepFileReferences) { if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory"); Set<String> fileReferencesInUse = new HashSet<>(); for (var applicationId : listApplications()) { try { Optional<Application> app = getOptionalApplication(applicationId); if (app.isEmpty()) continue; fileReferencesInUse.addAll(app.get().getModel().fileReferences().stream() .map(FileReference::value) .collect(Collectors.toSet())); } catch (Exception e) { log.log(Level.WARNING, "Getting file references in use for '" + applicationId + "' failed", e); } } log.log(Level.FINE, "File references in use : " + fileReferencesInUse); Set<String> fileReferencesOnDisk = getFileReferencesOnDisk(fileReferencesPath); log.log(Level.FINE, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk); Instant instant = Instant.now().minus(keepFileReferences); Set<String> fileReferencesToDelete = fileReferencesOnDisk .stream() .filter(fileReference -> ! fileReferencesInUse.contains(fileReference)) .filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant)) .collect(Collectors.toSet()); if (fileReferencesToDelete.size() > 0) { log.log(Level.FINE, "Will delete file references not in use: " + fileReferencesToDelete); fileReferencesToDelete.forEach(fileReference -> { File file = new File(fileReferencesPath, fileReference); if ( ! IOUtils.recursiveDeleteDir(file)) log.log(Level.WARNING, "Could not delete " + file.getAbsolutePath()); }); } return fileReferencesToDelete; } public Set<FileReference> getFileReferences(ApplicationId applicationId) { return getOptionalApplication(applicationId).map(app -> app.getModel().fileReferences()).orElse(Set.of()); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } public Tenant getTenant(ApplicationId applicationId) { return tenantRepository.getTenant(applicationId.tenant()); } private Application getApplication(ApplicationId applicationId) { return getApplication(applicationId, Optional.empty()); } private Application getApplication(ApplicationId applicationId, Optional<Version> version) { try { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = getRemoteSession(tenant, sessionId); return session.ensureApplicationLoaded().getForVersionOrLatest(version, clock.instant()); } catch (NotFoundException e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "': " + e.getMessage()); throw e; } catch (Exception e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "'", e); throw e; } } private Optional<Application> getOptionalApplication(ApplicationId applicationId) { try { return Optional.of(getApplication(applicationId)); } catch (Exception e) { return Optional.empty(); } } public Set<ApplicationId> listApplications() { return tenantRepository.getAllTenants().stream() .flatMap(tenant -> tenant.getApplicationRepo().activeApplications().stream()) .collect(Collectors.toSet()); } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) { SessionRepository sessionRepository = getTenant(applicationId).getSessionRepository(); Instant end = Instant.now().plus(waitTime); do { if (sessionRepository.getRemoteSession(sessionId) == null) return true; try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */} } while (Instant.now().isBefore(end)); return false; } public Optional<String> getApplicationPackageReference(ApplicationId applicationId) { Optional<String> applicationPackage = Optional.empty(); RemoteSession session = getActiveSession(applicationId); if (session != null) { FileReference applicationPackageReference = session.getApplicationPackageReference(); File downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig().fileReferencesDir())); if (applicationPackageReference != null && ! fileReferenceExistsOnDisk(downloadDirectory, applicationPackageReference)) applicationPackage = Optional.of(applicationPackageReference.value()); } return applicationPackage; } public List<Version> getAllVersions(ApplicationId applicationId) { Optional<ApplicationSet> applicationSet = getCurrentActiveApplicationSet(getTenant(applicationId), applicationId); if (applicationSet.isEmpty()) return List.of(); else return applicationSet.get().getAllVersions(applicationId); } public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri, Duration timeout, Optional<Version> vespaVersion) { return convergeChecker.checkService(getApplication(applicationId, vespaVersion), hostAndPort, uri, timeout); } public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri, Duration timeoutPerService, Optional<Version> vespaVersion) { return convergeChecker.servicesToCheck(getApplication(applicationId, vespaVersion), uri, timeoutPerService); } public HttpResponse getLogs(ApplicationId applicationId, Optional<String> hostname, String apiParams) { String logServerURI = getLogServerURI(applicationId, hostname) + apiParams; return logRetriever.getLogs(logServerURI); } public HttpResponse getTesterStatus(ApplicationId applicationId) { return testerClient.getStatus(getTesterHostname(applicationId), getTesterPort(applicationId)); } public HttpResponse getTesterLog(ApplicationId applicationId, Long after) { return testerClient.getLog(getTesterHostname(applicationId), getTesterPort(applicationId), after); } public HttpResponse startTests(ApplicationId applicationId, String suite, byte[] config) { return testerClient.startTests(getTesterHostname(applicationId), getTesterPort(applicationId), suite, config); } public HttpResponse isTesterReady(ApplicationId applicationId) { return testerClient.isTesterReady(getTesterHostname(applicationId), getTesterPort(applicationId)); } private String getTesterHostname(ApplicationId applicationId) { return getTesterServiceInfo(applicationId).getHostName(); } private int getTesterPort(ApplicationId applicationId) { ServiceInfo serviceInfo = getTesterServiceInfo(applicationId); return serviceInfo.getPorts().stream().filter(portInfo -> portInfo.getTags().contains("http")).findFirst().get().getPort(); } private ServiceInfo getTesterServiceInfo(ApplicationId applicationId) { Application application = getApplication(applicationId); return application.getModel().getHosts().stream() .findFirst().orElseThrow(() -> new InternalServerException("Could not find any host for tester app " + applicationId.toFullString())) .getServices().stream() .filter(service -> CONTAINER.serviceName.equals(service.getServiceType())) .findFirst() .orElseThrow(() -> new InternalServerException("Could not find any tester container for tester app " + applicationId.toFullString())); } public CompletionWaiter activate(LocalSession session, Session previousActiveSession, ApplicationId applicationId, boolean ignoreSessionStaleFailure) { CompletionWaiter waiter = session.getSessionZooKeeperClient().createActiveWaiter(); NestedTransaction transaction = new NestedTransaction(); transaction.add(deactivateCurrentActivateNew(previousActiveSession, session, ignoreSessionStaleFailure)); hostProvisioner.ifPresent(provisioner -> provisioner.activate(transaction, applicationId, session.getAllocatedHosts().getHosts())); transaction.commit(); return waiter; } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public RemoteSession getActiveSession(ApplicationId applicationId) { return getActiveSession(getTenant(applicationId), applicationId); } public long getSessionIdForApplication(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); return getSessionIdForApplication(tenant, applicationId); } private long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (! applicationRepo.exists(applicationId)) throw new NotFoundException("Unknown application id '" + applicationId + "'"); return applicationRepo.requireActiveSessionOf(applicationId); } public void validateThatSessionIsNotActive(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatSessionIsPrepared(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if ( ! Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { Tenant tenant = getTenant(applicationId); SessionRepository sessionRepository = tenant.getSessionRepository(); RemoteSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionRepository.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget); sessionRepository.addLocalSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = getTenant(applicationId); tenant.getApplicationRepo().createApplication(applicationId); Optional<Long> activeSessionId = tenant.getApplicationRepo().activeSessionOf(applicationId); LocalSession session = tenant.getSessionRepository().createSession(applicationDirectory, applicationId, timeoutBudget, activeSessionId); tenant.getSessionRepository().addLocalSession(session); return session.getSessionId(); } public void deleteExpiredLocalSessions() { Map<Tenant, List<LocalSession>> sessionsPerTenant = new HashMap<>(); tenantRepository.getAllTenants().forEach(tenant -> sessionsPerTenant.put(tenant, tenant.getSessionRepository().getLocalSessions())); Set<ApplicationId> applicationIds = new HashSet<>(); sessionsPerTenant.values() .forEach(sessionList -> sessionList.stream() .map(Session::getOptionalApplicationId) .filter(Optional::isPresent) .forEach(appId -> applicationIds.add(appId.get()))); Map<ApplicationId, Long> activeSessions = new HashMap<>(); applicationIds.forEach(applicationId -> { RemoteSession activeSession = getActiveSession(applicationId); if (activeSession != null) activeSessions.put(applicationId, activeSession.getSessionId()); }); sessionsPerTenant.keySet().forEach(tenant -> tenant.getSessionRepository().deleteExpiredSessions(activeSessions)); } public int deleteExpiredSessionLocks(Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredLocks(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public int deleteExpiredRemoteSessions(Duration expiryTime) { return deleteExpiredRemoteSessions(clock, expiryTime); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredRemoteSessions(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public TenantRepository tenantRepository() { return tenantRepository; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().activeApplications(); } public ProtonMetricsResponse getProtonMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); ProtonMetricsRetriever protonMetricsRetriever = new ProtonMetricsRetriever(); return protonMetricsRetriever.getMetrics(application); } public DeploymentMetricsResponse getDeploymentMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); DeploymentMetricsRetriever deploymentMetricsRetriever = new DeploymentMetricsRetriever(); return deploymentMetricsRetriever.getMetrics(application); } public ApplicationMetaData getMetadataFromLocalSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } public ConfigserverConfig configserverConfig() { return configserverConfig; } public ApplicationId getApplicationIdForHostname(String hostname) { Optional<ApplicationId> applicationId = tenantRepository.getAllTenantNames().stream() .map(tenantName -> tenantRepository.getTenant(tenantName).getApplicationRepo().getApplicationIdForHostName(hostname)) .filter(Objects::nonNull) .findFirst(); return applicationId.orElse(null); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getSessionRepository().getLocalSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getSessionRepository().getRemoteSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } public Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private void cleanupTempDirectory(File tempDir) { logger.log(Level.FINE, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(Level.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } private RemoteSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getRemoteSession(tenant, applicationRepo.requireActiveSessionOf(applicationId)); } private RemoteSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getRemoteSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } public LocalSession getActiveLocalSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getLocalSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } private String getLogServerURI(ApplicationId applicationId, Optional<String> hostname) { if (hostname.isPresent() && HOSTED_VESPA_TENANT.equals(applicationId.tenant())) { return "http: } Application application = getApplication(applicationId); Collection<HostInfo> hostInfos = application.getModel().getHosts(); HostInfo logServerHostInfo = hostInfos.stream() .filter(host -> host.getServices().stream() .anyMatch(serviceInfo -> serviceInfo.getServiceType().equalsIgnoreCase("logserver"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find host info for logserver")); ServiceInfo serviceInfo = logServerHostInfo.getServices().stream().filter(service -> List.of(LOGSERVER_CONTAINER.serviceName, CONTAINER.serviceName).contains(service.getServiceType())) .findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host")); int port = servicePort(serviceInfo); return "http: } private int servicePort(ServiceInfo serviceInfo) { return serviceInfo.getPorts().stream() .filter(portInfo -> portInfo.getTags().stream().anyMatch(tag -> tag.equalsIgnoreCase("http"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port")) .getPort(); } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } public Zone zone() { return new Zone(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); } /** Emits as a metric the time in millis spent while holding this timer, with deployment ID as dimensions. */ public ActionTimer timerFor(ApplicationId id, String metricName) { return new ActionTimer(metric, clock, id, configserverConfig.environment(), configserverConfig.region(), metricName); } public static class ActionTimer implements AutoCloseable { private final Metric metric; private final Clock clock; private final ApplicationId id; private final String environment; private final String region; private final String name; private final Instant start; private ActionTimer(Metric metric, Clock clock, ApplicationId id, String environment, String region, String name) { this.metric = metric; this.clock = clock; this.id = id; this.environment = environment; this.region = region; this.name = name; this.start = clock.instant(); } @Override public void close() { metric.set(name, Duration.between(start, clock.instant()).toMillis(), metric.createContext(Map.of("applicationId", id.toFullString(), "tenantName", id.tenant().value(), "app", id.application().value() + "." + id.instance().value(), "zone", environment + "." + region))); } } }
class ApplicationRepository implements com.yahoo.config.provision.Deployer { private static final Logger log = Logger.getLogger(ApplicationRepository.class.getName()); private final TenantRepository tenantRepository; private final Optional<Provisioner> hostProvisioner; private final Optional<InfraDeployer> infraDeployer; private final ConfigConvergenceChecker convergeChecker; private final HttpProxy httpProxy; private final Clock clock; private final DeployLogger logger = new SilentDeployLogger(); private final ConfigserverConfig configserverConfig; private final FileDistributionStatus fileDistributionStatus; private final Orchestrator orchestrator; private final LogRetriever logRetriever; private final TesterClient testerClient; private final Metric metric; private final BooleanFlag useTenantMetaData; @Inject public ApplicationRepository(TenantRepository tenantRepository, HostProvisionerProvider hostProvisionerProvider, InfraDeployerProvider infraDeployerProvider, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, hostProvisionerProvider.getHostProvisioner(), infraDeployerProvider.getInfraDeployer(), configConvergenceChecker, httpProxy, configserverConfig, orchestrator, new LogRetriever(), new FileDistributionStatus(), Clock.systemUTC(), testerClient, metric, flagSource); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, Clock clock) { this(tenantRepository, hostProvisioner, orchestrator, new ConfigserverConfig(new ConfigserverConfig.Builder()), new LogRetriever(), clock, new TesterClient(), new NullMetric(), new InMemoryFlagSource()); } public ApplicationRepository(TenantRepository tenantRepository, Provisioner hostProvisioner, Orchestrator orchestrator, ConfigserverConfig configserverConfig, LogRetriever logRetriever, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this(tenantRepository, Optional.of(hostProvisioner), Optional.empty(), new ConfigConvergenceChecker(), new HttpProxy(new SimpleHttpFetcher()), configserverConfig, orchestrator, logRetriever, new FileDistributionStatus(), clock, testerClient, metric, flagSource); } private ApplicationRepository(TenantRepository tenantRepository, Optional<Provisioner> hostProvisioner, Optional<InfraDeployer> infraDeployer, ConfigConvergenceChecker configConvergenceChecker, HttpProxy httpProxy, ConfigserverConfig configserverConfig, Orchestrator orchestrator, LogRetriever logRetriever, FileDistributionStatus fileDistributionStatus, Clock clock, TesterClient testerClient, Metric metric, FlagSource flagSource) { this.tenantRepository = tenantRepository; this.hostProvisioner = hostProvisioner; this.infraDeployer = infraDeployer; this.convergeChecker = configConvergenceChecker; this.httpProxy = httpProxy; this.configserverConfig = configserverConfig; this.orchestrator = orchestrator; this.logRetriever = logRetriever; this.fileDistributionStatus = fileDistributionStatus; this.clock = clock; this.testerClient = testerClient; this.metric = metric; this.useTenantMetaData = Flags.USE_TENANT_META_DATA.bindTo(flagSource); } public Metric metric() { return metric; } public PrepareResult prepare(Tenant tenant, long sessionId, PrepareParams prepareParams, Instant now) { validateThatLocalSessionIsNotActive(tenant, sessionId); LocalSession session = getLocalSession(tenant, sessionId); ApplicationId applicationId = prepareParams.getApplicationId(); Optional<ApplicationSet> currentActiveApplicationSet = getCurrentActiveApplicationSet(tenant, applicationId); Slime deployLog = createDeployLog(); DeployLogger logger = new DeployHandlerLogger(deployLog.get().setArray("log"), prepareParams.isVerbose(), applicationId); try (ActionTimer timer = timerFor(applicationId, "deployment.prepareMillis")) { SessionRepository sessionRepository = tenant.getSessionRepository(); ConfigChangeActions actions = sessionRepository.prepareLocalSession(session, logger, prepareParams, currentActiveApplicationSet, tenant.getPath(), now); logConfigChangeActions(actions, logger); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " prepared successfully. "); return new PrepareResult(sessionId, actions, deployLog); } } public PrepareResult deploy(CompressedApplicationInputStream in, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); PrepareResult prepareResult; try { prepareResult = deploy(decompressApplication(in, tempDir), prepareParams, ignoreSessionStaleFailure, now); } finally { cleanupTempDirectory(tempDir); } return prepareResult; } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams) { return deploy(applicationPackage, prepareParams, false, Instant.now()); } public PrepareResult deploy(File applicationPackage, PrepareParams prepareParams, boolean ignoreSessionStaleFailure, Instant now) { ApplicationId applicationId = prepareParams.getApplicationId(); long sessionId = createSession(applicationId, prepareParams.getTimeoutBudget(), applicationPackage); Tenant tenant = getTenant(applicationId); PrepareResult result = prepare(tenant, sessionId, prepareParams, now); activate(tenant, sessionId, prepareParams.getTimeoutBudget(), ignoreSessionStaleFailure); return result; } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application) { return deployFromLocalActive(application, false); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, boolean bootstrap) { return deployFromLocalActive(application, Duration.ofSeconds(configserverConfig.zookeeper().barrierTimeout()).plus(Duration.ofSeconds(5)), bootstrap); } /** * Creates a new deployment from the active application, if available. * This is used for system internal redeployments, not on application package changes. * * @param application the active application to be redeployed * @param timeout the timeout to use for each individual deployment operation * @param bootstrap the deployment is done when bootstrapping * @return a new deployment from the local active, or empty if a local active application * was not present for this id (meaning it either is not active or active on another * node in the config server cluster) */ @Override public Optional<com.yahoo.config.provision.Deployment> deployFromLocalActive(ApplicationId application, Duration timeout, boolean bootstrap) { Optional<com.yahoo.config.provision.Deployment> infraDeployment = infraDeployer.flatMap(d -> d.getDeployment(application)); if (infraDeployment.isPresent()) return infraDeployment; Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); LocalSession activeSession = getActiveLocalSession(tenant, application); if (activeSession == null) return Optional.empty(); TimeoutBudget timeoutBudget = new TimeoutBudget(clock, timeout); SessionRepository sessionRepository = tenant.getSessionRepository(); LocalSession newSession = sessionRepository.createSessionFromExisting(activeSession, logger, true, timeoutBudget); sessionRepository.addLocalSession(newSession); return Optional.of(Deployment.unprepared(newSession, this, hostProvisioner, tenant, timeout, clock, false /* don't validate as this is already deployed */, bootstrap)); } @Override public Optional<Instant> lastDeployTime(ApplicationId application) { Tenant tenant = tenantRepository.getTenant(application.tenant()); if (tenant == null) return Optional.empty(); RemoteSession activeSession = getActiveSession(tenant, application); if (activeSession == null) return Optional.empty(); return Optional.of(activeSession.getCreateTime()); } public ApplicationId activate(Tenant tenant, long sessionId, TimeoutBudget timeoutBudget, boolean ignoreSessionStaleFailure) { LocalSession localSession = getLocalSession(tenant, sessionId); Deployment deployment = deployFromPreparedSession(localSession, tenant, timeoutBudget.timeLeft()); deployment.setIgnoreSessionStaleFailure(ignoreSessionStaleFailure); deployment.activate(); return localSession.getApplicationId(); } private Deployment deployFromPreparedSession(LocalSession session, Tenant tenant, Duration timeout) { return Deployment.prepared(session, this, hostProvisioner, tenant, timeout, clock, false); } public Transaction deactivateCurrentActivateNew(Session active, LocalSession prepared, boolean ignoreStaleSessionFailure) { Tenant tenant = tenantRepository.getTenant(prepared.getTenantName()); Transaction transaction = tenant.getSessionRepository().createActivateTransaction(prepared); if (active != null) { checkIfActiveHasChanged(prepared, active, ignoreStaleSessionFailure); checkIfActiveIsNewerThanSessionToBeActivated(prepared.getSessionId(), active.getSessionId()); transaction.add(active.createDeactivateTransaction().operations()); } if (useTenantMetaData.value()) transaction.add(writeTenantMetaData(tenant).operations()); return transaction; } private byte[] createMetaData(Tenant tenant) { return new TenantMetaData(tenant.getSessionRepository().clock().instant()).asJsonBytes(); } TenantMetaData getTenantMetaData(Tenant tenant) { Optional<byte[]> data = tenantRepository.getCurator().getData(TenantRepository.getTenantPath(tenant.getName())); return data.map(bytes -> TenantMetaData.fromJsonString(Utf8.toString(bytes))).orElse(new TenantMetaData(tenant.getCreatedTime())); } private Transaction writeTenantMetaData(Tenant tenant) { return new CuratorTransaction(tenantRepository.getCurator()) .add(CuratorOperations.setData(TenantRepository.getTenantPath(tenant.getName()).getAbsolute(), createMetaData(tenant))); } static void checkIfActiveHasChanged(LocalSession session, Session currentActiveSession, boolean ignoreStaleSessionFailure) { long activeSessionAtCreate = session.getActiveSessionAtCreate(); log.log(Level.FINE, currentActiveSession.logPre() + "active session id at create time=" + activeSessionAtCreate); if (activeSessionAtCreate == 0) return; long sessionId = session.getSessionId(); long currentActiveSessionSessionId = currentActiveSession.getSessionId(); log.log(Level.FINE, currentActiveSession.logPre() + "sessionId=" + sessionId + ", current active session=" + currentActiveSessionSessionId); if (currentActiveSession.isNewerThan(activeSessionAtCreate) && currentActiveSessionSessionId != sessionId) { String errMsg = currentActiveSession.logPre() + "Cannot activate session " + sessionId + " because the currently active session (" + currentActiveSessionSessionId + ") has changed since session " + sessionId + " was created (was " + activeSessionAtCreate + " at creation time)"; if (ignoreStaleSessionFailure) { log.warning(errMsg + " (Continuing because of force.)"); } else { throw new ActivationConflictException(errMsg); } } } static void checkIfActiveIsNewerThanSessionToBeActivated(long sessionId, long currentActiveSessionId) { if (sessionId < currentActiveSessionId) { throw new ActivationConflictException("It is not possible to activate session " + sessionId + ", because it is older than current active session (" + currentActiveSessionId + ")"); } } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ boolean delete(ApplicationId applicationId) { return delete(applicationId, Duration.ofSeconds(60)); } /** * Deletes an application * * @return true if the application was found and deleted, false if it was not present * @throws RuntimeException if the delete transaction fails. This method is exception safe. */ public boolean delete(ApplicationId applicationId, Duration waitTime) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { if ( ! tenantApplications.exists(applicationId)) return false; Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; long sessionId = activeSession.get(); RemoteSession remoteSession; try { remoteSession = getRemoteSession(tenant, sessionId); Transaction deleteTransaction = remoteSession.createDeleteTransaction(); deleteTransaction.commit(); log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Waiting for session " + sessionId + " to be deleted"); if ( ! waitTime.isZero() && localSessionHasBeenDeleted(applicationId, sessionId, waitTime)) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Session " + sessionId + " deleted"); } else { deleteTransaction.rollbackOrLog(); throw new InternalServerException(applicationId + " was not deleted (waited " + waitTime + "), session " + sessionId); } } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } } public HttpResponse clusterControllerStatusPage(ApplicationId applicationId, String hostName, String pathSuffix) { String relativePath = "clustercontroller-status/" + pathSuffix; return httpProxy.get(getApplication(applicationId), hostName, CLUSTERCONTROLLER_CONTAINER.serviceName, relativePath); } public Long getApplicationGeneration(ApplicationId applicationId) { return getApplication(applicationId).getApplicationGeneration(); } public void restart(ApplicationId applicationId, HostFilter hostFilter) { hostProvisioner.ifPresent(provisioner -> provisioner.restart(applicationId, hostFilter)); } public boolean isSuspended(ApplicationId application) { return orchestrator.getAllSuspendedApplications().contains(application); } public HttpResponse filedistributionStatus(ApplicationId applicationId, Duration timeout) { return fileDistributionStatus.status(getApplication(applicationId), timeout); } public Set<String> deleteUnusedFiledistributionReferences(File fileReferencesPath, Duration keepFileReferences) { if (!fileReferencesPath.isDirectory()) throw new RuntimeException(fileReferencesPath + " is not a directory"); Set<String> fileReferencesInUse = new HashSet<>(); for (var applicationId : listApplications()) { try { Optional<Application> app = getOptionalApplication(applicationId); if (app.isEmpty()) continue; fileReferencesInUse.addAll(app.get().getModel().fileReferences().stream() .map(FileReference::value) .collect(Collectors.toSet())); } catch (Exception e) { log.log(Level.WARNING, "Getting file references in use for '" + applicationId + "' failed", e); } } log.log(Level.FINE, "File references in use : " + fileReferencesInUse); Set<String> fileReferencesOnDisk = getFileReferencesOnDisk(fileReferencesPath); log.log(Level.FINE, "File references on disk (in " + fileReferencesPath + "): " + fileReferencesOnDisk); Instant instant = Instant.now().minus(keepFileReferences); Set<String> fileReferencesToDelete = fileReferencesOnDisk .stream() .filter(fileReference -> ! fileReferencesInUse.contains(fileReference)) .filter(fileReference -> isFileLastModifiedBefore(new File(fileReferencesPath, fileReference), instant)) .collect(Collectors.toSet()); if (fileReferencesToDelete.size() > 0) { log.log(Level.FINE, "Will delete file references not in use: " + fileReferencesToDelete); fileReferencesToDelete.forEach(fileReference -> { File file = new File(fileReferencesPath, fileReference); if ( ! IOUtils.recursiveDeleteDir(file)) log.log(Level.WARNING, "Could not delete " + file.getAbsolutePath()); }); } return fileReferencesToDelete; } public Set<FileReference> getFileReferences(ApplicationId applicationId) { return getOptionalApplication(applicationId).map(app -> app.getModel().fileReferences()).orElse(Set.of()); } public ApplicationFile getApplicationFileFromSession(TenantName tenantName, long sessionId, String path, LocalSession.Mode mode) { Tenant tenant = tenantRepository.getTenant(tenantName); return getLocalSession(tenant, sessionId).getApplicationFile(Path.fromString(path), mode); } public Tenant getTenant(ApplicationId applicationId) { return tenantRepository.getTenant(applicationId.tenant()); } private Application getApplication(ApplicationId applicationId) { return getApplication(applicationId, Optional.empty()); } private Application getApplication(ApplicationId applicationId, Optional<Version> version) { try { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); long sessionId = getSessionIdForApplication(tenant, applicationId); RemoteSession session = getRemoteSession(tenant, sessionId); return session.ensureApplicationLoaded().getForVersionOrLatest(version, clock.instant()); } catch (NotFoundException e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "': " + e.getMessage()); throw e; } catch (Exception e) { log.log(Level.WARNING, "Failed getting application for '" + applicationId + "'", e); throw e; } } private Optional<Application> getOptionalApplication(ApplicationId applicationId) { try { return Optional.of(getApplication(applicationId)); } catch (Exception e) { return Optional.empty(); } } public Set<ApplicationId> listApplications() { return tenantRepository.getAllTenants().stream() .flatMap(tenant -> tenant.getApplicationRepo().activeApplications().stream()) .collect(Collectors.toSet()); } private boolean isFileLastModifiedBefore(File fileReference, Instant instant) { BasicFileAttributes fileAttributes; try { fileAttributes = readAttributes(fileReference.toPath(), BasicFileAttributes.class); return fileAttributes.lastModifiedTime().toInstant().isBefore(instant); } catch (IOException e) { throw new UncheckedIOException(e); } } private boolean localSessionHasBeenDeleted(ApplicationId applicationId, long sessionId, Duration waitTime) { SessionRepository sessionRepository = getTenant(applicationId).getSessionRepository(); Instant end = Instant.now().plus(waitTime); do { if (sessionRepository.getRemoteSession(sessionId) == null) return true; try { Thread.sleep(10); } catch (InterruptedException e) { /* ignored */} } while (Instant.now().isBefore(end)); return false; } public Optional<String> getApplicationPackageReference(ApplicationId applicationId) { Optional<String> applicationPackage = Optional.empty(); RemoteSession session = getActiveSession(applicationId); if (session != null) { FileReference applicationPackageReference = session.getApplicationPackageReference(); File downloadDirectory = new File(Defaults.getDefaults().underVespaHome(configserverConfig().fileReferencesDir())); if (applicationPackageReference != null && ! fileReferenceExistsOnDisk(downloadDirectory, applicationPackageReference)) applicationPackage = Optional.of(applicationPackageReference.value()); } return applicationPackage; } public List<Version> getAllVersions(ApplicationId applicationId) { Optional<ApplicationSet> applicationSet = getCurrentActiveApplicationSet(getTenant(applicationId), applicationId); if (applicationSet.isEmpty()) return List.of(); else return applicationSet.get().getAllVersions(applicationId); } public HttpResponse checkServiceForConfigConvergence(ApplicationId applicationId, String hostAndPort, URI uri, Duration timeout, Optional<Version> vespaVersion) { return convergeChecker.checkService(getApplication(applicationId, vespaVersion), hostAndPort, uri, timeout); } public HttpResponse servicesToCheckForConfigConvergence(ApplicationId applicationId, URI uri, Duration timeoutPerService, Optional<Version> vespaVersion) { return convergeChecker.servicesToCheck(getApplication(applicationId, vespaVersion), uri, timeoutPerService); } public HttpResponse getLogs(ApplicationId applicationId, Optional<String> hostname, String apiParams) { String logServerURI = getLogServerURI(applicationId, hostname) + apiParams; return logRetriever.getLogs(logServerURI); } public HttpResponse getTesterStatus(ApplicationId applicationId) { return testerClient.getStatus(getTesterHostname(applicationId), getTesterPort(applicationId)); } public HttpResponse getTesterLog(ApplicationId applicationId, Long after) { return testerClient.getLog(getTesterHostname(applicationId), getTesterPort(applicationId), after); } public HttpResponse startTests(ApplicationId applicationId, String suite, byte[] config) { return testerClient.startTests(getTesterHostname(applicationId), getTesterPort(applicationId), suite, config); } public HttpResponse isTesterReady(ApplicationId applicationId) { return testerClient.isTesterReady(getTesterHostname(applicationId), getTesterPort(applicationId)); } private String getTesterHostname(ApplicationId applicationId) { return getTesterServiceInfo(applicationId).getHostName(); } private int getTesterPort(ApplicationId applicationId) { ServiceInfo serviceInfo = getTesterServiceInfo(applicationId); return serviceInfo.getPorts().stream().filter(portInfo -> portInfo.getTags().contains("http")).findFirst().get().getPort(); } private ServiceInfo getTesterServiceInfo(ApplicationId applicationId) { Application application = getApplication(applicationId); return application.getModel().getHosts().stream() .findFirst().orElseThrow(() -> new InternalServerException("Could not find any host for tester app " + applicationId.toFullString())) .getServices().stream() .filter(service -> CONTAINER.serviceName.equals(service.getServiceType())) .findFirst() .orElseThrow(() -> new InternalServerException("Could not find any tester container for tester app " + applicationId.toFullString())); } public CompletionWaiter activate(LocalSession session, Session previousActiveSession, ApplicationId applicationId, boolean ignoreSessionStaleFailure) { CompletionWaiter waiter = session.getSessionZooKeeperClient().createActiveWaiter(); NestedTransaction transaction = new NestedTransaction(); transaction.add(deactivateCurrentActivateNew(previousActiveSession, session, ignoreSessionStaleFailure)); hostProvisioner.ifPresent(provisioner -> provisioner.activate(transaction, applicationId, session.getAllocatedHosts().getHosts())); transaction.commit(); return waiter; } /** * Gets the active Session for the given application id. * * @return the active session, or null if there is no active session for the given application id. */ public RemoteSession getActiveSession(ApplicationId applicationId) { return getActiveSession(getTenant(applicationId), applicationId); } public long getSessionIdForApplication(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) throw new NotFoundException("Tenant '" + applicationId.tenant() + "' not found"); return getSessionIdForApplication(tenant, applicationId); } private long getSessionIdForApplication(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (! applicationRepo.exists(applicationId)) throw new NotFoundException("Unknown application id '" + applicationId + "'"); return applicationRepo.requireActiveSessionOf(applicationId); } public void validateThatSessionIsNotActive(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } public void validateThatSessionIsPrepared(Tenant tenant, long sessionId) { Session session = getRemoteSession(tenant, sessionId); if ( ! Session.Status.PREPARE.equals(session.getStatus())) throw new IllegalStateException("Session not prepared: " + sessionId); } public long createSessionFromExisting(ApplicationId applicationId, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { Tenant tenant = getTenant(applicationId); SessionRepository sessionRepository = tenant.getSessionRepository(); RemoteSession fromSession = getExistingSession(tenant, applicationId); LocalSession session = sessionRepository.createSessionFromExisting(fromSession, logger, internalRedeploy, timeoutBudget); sessionRepository.addLocalSession(session); return session.getSessionId(); } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, InputStream in, String contentType) { File tempDir = uncheck(() -> Files.createTempDirectory("deploy")).toFile(); long sessionId; try { sessionId = createSession(applicationId, timeoutBudget, decompressApplication(in, contentType, tempDir)); } finally { cleanupTempDirectory(tempDir); } return sessionId; } public long createSession(ApplicationId applicationId, TimeoutBudget timeoutBudget, File applicationDirectory) { Tenant tenant = getTenant(applicationId); tenant.getApplicationRepo().createApplication(applicationId); Optional<Long> activeSessionId = tenant.getApplicationRepo().activeSessionOf(applicationId); LocalSession session = tenant.getSessionRepository().createSession(applicationDirectory, applicationId, timeoutBudget, activeSessionId); tenant.getSessionRepository().addLocalSession(session); return session.getSessionId(); } public void deleteExpiredLocalSessions() { Map<Tenant, List<LocalSession>> sessionsPerTenant = new HashMap<>(); tenantRepository.getAllTenants().forEach(tenant -> sessionsPerTenant.put(tenant, tenant.getSessionRepository().getLocalSessions())); Set<ApplicationId> applicationIds = new HashSet<>(); sessionsPerTenant.values() .forEach(sessionList -> sessionList.stream() .map(Session::getOptionalApplicationId) .filter(Optional::isPresent) .forEach(appId -> applicationIds.add(appId.get()))); Map<ApplicationId, Long> activeSessions = new HashMap<>(); applicationIds.forEach(applicationId -> { RemoteSession activeSession = getActiveSession(applicationId); if (activeSession != null) activeSessions.put(applicationId, activeSession.getSessionId()); }); sessionsPerTenant.keySet().forEach(tenant -> tenant.getSessionRepository().deleteExpiredSessions(activeSessions)); } public int deleteExpiredSessionLocks(Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredLocks(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public int deleteExpiredRemoteSessions(Duration expiryTime) { return deleteExpiredRemoteSessions(clock, expiryTime); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { return tenantRepository.getAllTenants() .stream() .map(tenant -> tenant.getSessionRepository().deleteExpiredRemoteSessions(clock, expiryTime)) .mapToInt(i -> i) .sum(); } public TenantRepository tenantRepository() { return tenantRepository; } public void deleteTenant(TenantName tenantName) { List<ApplicationId> activeApplications = activeApplications(tenantName); if (activeApplications.isEmpty()) tenantRepository.deleteTenant(tenantName); else throw new IllegalArgumentException("Cannot delete tenant '" + tenantName + "', it has active applications: " + activeApplications); } private List<ApplicationId> activeApplications(TenantName tenantName) { return tenantRepository.getTenant(tenantName).getApplicationRepo().activeApplications(); } public ProtonMetricsResponse getProtonMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); ProtonMetricsRetriever protonMetricsRetriever = new ProtonMetricsRetriever(); return protonMetricsRetriever.getMetrics(application); } public DeploymentMetricsResponse getDeploymentMetrics(ApplicationId applicationId) { Application application = getApplication(applicationId); DeploymentMetricsRetriever deploymentMetricsRetriever = new DeploymentMetricsRetriever(); return deploymentMetricsRetriever.getMetrics(application); } public ApplicationMetaData getMetadataFromLocalSession(Tenant tenant, long sessionId) { return getLocalSession(tenant, sessionId).getMetaData(); } public ConfigserverConfig configserverConfig() { return configserverConfig; } public ApplicationId getApplicationIdForHostname(String hostname) { Optional<ApplicationId> applicationId = tenantRepository.getAllTenantNames().stream() .map(tenantName -> tenantRepository.getTenant(tenantName).getApplicationRepo().getApplicationIdForHostName(hostname)) .filter(Objects::nonNull) .findFirst(); return applicationId.orElse(null); } private void validateThatLocalSessionIsNotActive(Tenant tenant, long sessionId) { LocalSession session = getLocalSession(tenant, sessionId); if (Session.Status.ACTIVATE.equals(session.getStatus())) { throw new IllegalStateException("Session is active: " + sessionId); } } private LocalSession getLocalSession(Tenant tenant, long sessionId) { LocalSession session = tenant.getSessionRepository().getLocalSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } private RemoteSession getRemoteSession(Tenant tenant, long sessionId) { RemoteSession session = tenant.getSessionRepository().getRemoteSession(sessionId); if (session == null) throw new NotFoundException("Session " + sessionId + " was not found"); return session; } public Optional<ApplicationSet> getCurrentActiveApplicationSet(Tenant tenant, ApplicationId appId) { Optional<ApplicationSet> currentActiveApplicationSet = Optional.empty(); TenantApplications applicationRepo = tenant.getApplicationRepo(); try { long currentActiveSessionId = applicationRepo.requireActiveSessionOf(appId); RemoteSession currentActiveSession = getRemoteSession(tenant, currentActiveSessionId); currentActiveApplicationSet = Optional.ofNullable(currentActiveSession.ensureApplicationLoaded()); } catch (IllegalArgumentException e) { } return currentActiveApplicationSet; } private File decompressApplication(InputStream in, String contentType, File tempDir) { try (CompressedApplicationInputStream application = CompressedApplicationInputStream.createFromCompressedStream(in, contentType)) { return decompressApplication(application, tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress data in body", e); } } private File decompressApplication(CompressedApplicationInputStream in, File tempDir) { try { return in.decompress(tempDir); } catch (IOException e) { throw new IllegalArgumentException("Unable to decompress stream", e); } } private void cleanupTempDirectory(File tempDir) { logger.log(Level.FINE, "Deleting tmp dir '" + tempDir + "'"); if (!IOUtils.recursiveDeleteDir(tempDir)) { logger.log(Level.WARNING, "Not able to delete tmp dir '" + tempDir + "'"); } } private RemoteSession getExistingSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); return getRemoteSession(tenant, applicationRepo.requireActiveSessionOf(applicationId)); } private RemoteSession getActiveSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getRemoteSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } public LocalSession getActiveLocalSession(Tenant tenant, ApplicationId applicationId) { TenantApplications applicationRepo = tenant.getApplicationRepo(); if (applicationRepo.activeApplications().contains(applicationId)) { return tenant.getSessionRepository().getLocalSession(applicationRepo.requireActiveSessionOf(applicationId)); } return null; } private static void logConfigChangeActions(ConfigChangeActions actions, DeployLogger logger) { RestartActions restartActions = actions.getRestartActions(); if ( ! restartActions.isEmpty()) { logger.log(Level.WARNING, "Change(s) between active and new application that require restart:\n" + restartActions.format()); } RefeedActions refeedActions = actions.getRefeedActions(); if ( ! refeedActions.isEmpty()) { boolean allAllowed = refeedActions.getEntries().stream().allMatch(RefeedActions.Entry::allowed); logger.log(allAllowed ? Level.INFO : Level.WARNING, "Change(s) between active and new application that may require re-feed:\n" + refeedActions.format()); } } private String getLogServerURI(ApplicationId applicationId, Optional<String> hostname) { if (hostname.isPresent() && HOSTED_VESPA_TENANT.equals(applicationId.tenant())) { return "http: } Application application = getApplication(applicationId); Collection<HostInfo> hostInfos = application.getModel().getHosts(); HostInfo logServerHostInfo = hostInfos.stream() .filter(host -> host.getServices().stream() .anyMatch(serviceInfo -> serviceInfo.getServiceType().equalsIgnoreCase("logserver"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find host info for logserver")); ServiceInfo serviceInfo = logServerHostInfo.getServices().stream().filter(service -> List.of(LOGSERVER_CONTAINER.serviceName, CONTAINER.serviceName).contains(service.getServiceType())) .findFirst().orElseThrow(() -> new IllegalArgumentException("No container running on logserver host")); int port = servicePort(serviceInfo); return "http: } private int servicePort(ServiceInfo serviceInfo) { return serviceInfo.getPorts().stream() .filter(portInfo -> portInfo.getTags().stream().anyMatch(tag -> tag.equalsIgnoreCase("http"))) .findFirst().orElseThrow(() -> new IllegalArgumentException("Could not find HTTP port")) .getPort(); } public Slime createDeployLog() { Slime deployLog = new Slime(); deployLog.setObject(); return deployLog; } public Zone zone() { return new Zone(SystemName.from(configserverConfig.system()), Environment.from(configserverConfig.environment()), RegionName.from(configserverConfig.region())); } /** Emits as a metric the time in millis spent while holding this timer, with deployment ID as dimensions. */ public ActionTimer timerFor(ApplicationId id, String metricName) { return new ActionTimer(metric, clock, id, configserverConfig.environment(), configserverConfig.region(), metricName); } public static class ActionTimer implements AutoCloseable { private final Metric metric; private final Clock clock; private final ApplicationId id; private final String environment; private final String region; private final String name; private final Instant start; private ActionTimer(Metric metric, Clock clock, ApplicationId id, String environment, String region, String name) { this.metric = metric; this.clock = clock; this.id = id; this.environment = environment; this.region = region; this.name = name; this.start = clock.instant(); } @Override public void close() { metric.set(name, Duration.between(start, clock.instant()).toMillis(), metric.createContext(Map.of("applicationId", id.toFullString(), "tenantName", id.tenant().value(), "app", id.application().value() + "." + id.instance().value(), "zone", environment + "." + region))); } } }
Should this be outerComponent.inject(middleComponent); ?
public void all_created_components_are_returned_in_reverse_topological_order() { Node innerComponent = mockComponentNode(SimpleComponent.class); Node middleComponent = mockComponentNode(ComponentTakingComponent.class); Node outerComponent = mockComponentNode(ComponentTakingComponentTakingComponent.class); middleComponent.inject(innerComponent); outerComponent.inject(innerComponent); ComponentGraph componentGraph = new ComponentGraph(); componentGraph.add(innerComponent); componentGraph.add(middleComponent); componentGraph.add(outerComponent); componentGraph.complete(); innerComponent.constructInstance(); middleComponent.constructInstance(); outerComponent.constructInstance(); assertEquals(List.of(outerComponent.constructedInstance().get(), middleComponent.constructedInstance().get(), innerComponent.constructedInstance().get()), componentGraph.allConstructedComponentsAndProviders()); }
outerComponent.inject(innerComponent);
public void all_created_components_are_returned_in_reverse_topological_order() { for (int i = 0; i < 10; i++) { Node innerComponent = mockComponentNode(SimpleComponent.class); Node middleComponent = mockComponentNode(ComponentTakingComponent.class); Node outerComponent = mockComponentNode(ComponentTakingComponentTakingComponent.class); ComponentGraph componentGraph = new ComponentGraph(); componentGraph.add(innerComponent); componentGraph.add(middleComponent); componentGraph.add(outerComponent); componentGraph.complete(); innerComponent.constructInstance(); middleComponent.constructInstance(); outerComponent.constructInstance(); assertEquals(List.of(outerComponent.constructedInstance().get(), middleComponent.constructedInstance().get(), innerComponent.constructedInstance().get()), componentGraph.allConstructedComponentsAndProviders()); } }
class ConfigMap extends HashMap<ConfigKey<? extends ConfigInstance>, ConfigInstance> { public ConfigMap() { super(); } public <T extends ConfigInstance> ConfigMap add(Class<T> clazz, String configId) { ConfigKey<T> key = new ConfigKey<>(clazz, configId); put(key, ConfigGetter.getConfig(key.getConfigClass(), key.getConfigId())); return this; } public static <T extends ConfigInstance> ConfigMap newMap(Class<T> clazz, String configId) { ConfigMap ret = new ConfigMap(); ret.add(clazz, configId); return ret; } }
class ConfigMap extends HashMap<ConfigKey<? extends ConfigInstance>, ConfigInstance> { public ConfigMap() { super(); } public <T extends ConfigInstance> ConfigMap add(Class<T> clazz, String configId) { ConfigKey<T> key = new ConfigKey<>(clazz, configId); put(key, ConfigGetter.getConfig(key.getConfigClass(), key.getConfigId())); return this; } public static <T extends ConfigInstance> ConfigMap newMap(Class<T> clazz, String configId) { ConfigMap ret = new ConfigMap(); ret.add(clazz, configId); return ret; } }
Yes, it should. Not sure it changes anything, and no idea why it's allowed, though :) Perhaps this isn't needed at all for the unit test.
public void all_created_components_are_returned_in_reverse_topological_order() { Node innerComponent = mockComponentNode(SimpleComponent.class); Node middleComponent = mockComponentNode(ComponentTakingComponent.class); Node outerComponent = mockComponentNode(ComponentTakingComponentTakingComponent.class); middleComponent.inject(innerComponent); outerComponent.inject(innerComponent); ComponentGraph componentGraph = new ComponentGraph(); componentGraph.add(innerComponent); componentGraph.add(middleComponent); componentGraph.add(outerComponent); componentGraph.complete(); innerComponent.constructInstance(); middleComponent.constructInstance(); outerComponent.constructInstance(); assertEquals(List.of(outerComponent.constructedInstance().get(), middleComponent.constructedInstance().get(), innerComponent.constructedInstance().get()), componentGraph.allConstructedComponentsAndProviders()); }
outerComponent.inject(innerComponent);
public void all_created_components_are_returned_in_reverse_topological_order() { for (int i = 0; i < 10; i++) { Node innerComponent = mockComponentNode(SimpleComponent.class); Node middleComponent = mockComponentNode(ComponentTakingComponent.class); Node outerComponent = mockComponentNode(ComponentTakingComponentTakingComponent.class); ComponentGraph componentGraph = new ComponentGraph(); componentGraph.add(innerComponent); componentGraph.add(middleComponent); componentGraph.add(outerComponent); componentGraph.complete(); innerComponent.constructInstance(); middleComponent.constructInstance(); outerComponent.constructInstance(); assertEquals(List.of(outerComponent.constructedInstance().get(), middleComponent.constructedInstance().get(), innerComponent.constructedInstance().get()), componentGraph.allConstructedComponentsAndProviders()); } }
class ConfigMap extends HashMap<ConfigKey<? extends ConfigInstance>, ConfigInstance> { public ConfigMap() { super(); } public <T extends ConfigInstance> ConfigMap add(Class<T> clazz, String configId) { ConfigKey<T> key = new ConfigKey<>(clazz, configId); put(key, ConfigGetter.getConfig(key.getConfigClass(), key.getConfigId())); return this; } public static <T extends ConfigInstance> ConfigMap newMap(Class<T> clazz, String configId) { ConfigMap ret = new ConfigMap(); ret.add(clazz, configId); return ret; } }
class ConfigMap extends HashMap<ConfigKey<? extends ConfigInstance>, ConfigInstance> { public ConfigMap() { super(); } public <T extends ConfigInstance> ConfigMap add(Class<T> clazz, String configId) { ConfigKey<T> key = new ConfigKey<>(clazz, configId); put(key, ConfigGetter.getConfig(key.getConfigClass(), key.getConfigId())); return this; } public static <T extends ConfigInstance> ConfigMap newMap(Class<T> clazz, String configId) { ConfigMap ret = new ConfigMap(); ret.add(clazz, configId); return ret; } }
Also, the test should probably run lots of times, to catch the 1/6 false positives each time. With randomised operation order before verification.
public void all_created_components_are_returned_in_reverse_topological_order() { Node innerComponent = mockComponentNode(SimpleComponent.class); Node middleComponent = mockComponentNode(ComponentTakingComponent.class); Node outerComponent = mockComponentNode(ComponentTakingComponentTakingComponent.class); middleComponent.inject(innerComponent); outerComponent.inject(innerComponent); ComponentGraph componentGraph = new ComponentGraph(); componentGraph.add(innerComponent); componentGraph.add(middleComponent); componentGraph.add(outerComponent); componentGraph.complete(); innerComponent.constructInstance(); middleComponent.constructInstance(); outerComponent.constructInstance(); assertEquals(List.of(outerComponent.constructedInstance().get(), middleComponent.constructedInstance().get(), innerComponent.constructedInstance().get()), componentGraph.allConstructedComponentsAndProviders()); }
outerComponent.inject(innerComponent);
public void all_created_components_are_returned_in_reverse_topological_order() { for (int i = 0; i < 10; i++) { Node innerComponent = mockComponentNode(SimpleComponent.class); Node middleComponent = mockComponentNode(ComponentTakingComponent.class); Node outerComponent = mockComponentNode(ComponentTakingComponentTakingComponent.class); ComponentGraph componentGraph = new ComponentGraph(); componentGraph.add(innerComponent); componentGraph.add(middleComponent); componentGraph.add(outerComponent); componentGraph.complete(); innerComponent.constructInstance(); middleComponent.constructInstance(); outerComponent.constructInstance(); assertEquals(List.of(outerComponent.constructedInstance().get(), middleComponent.constructedInstance().get(), innerComponent.constructedInstance().get()), componentGraph.allConstructedComponentsAndProviders()); } }
class ConfigMap extends HashMap<ConfigKey<? extends ConfigInstance>, ConfigInstance> { public ConfigMap() { super(); } public <T extends ConfigInstance> ConfigMap add(Class<T> clazz, String configId) { ConfigKey<T> key = new ConfigKey<>(clazz, configId); put(key, ConfigGetter.getConfig(key.getConfigClass(), key.getConfigId())); return this; } public static <T extends ConfigInstance> ConfigMap newMap(Class<T> clazz, String configId) { ConfigMap ret = new ConfigMap(); ret.add(clazz, configId); return ret; } }
class ConfigMap extends HashMap<ConfigKey<? extends ConfigInstance>, ConfigInstance> { public ConfigMap() { super(); } public <T extends ConfigInstance> ConfigMap add(Class<T> clazz, String configId) { ConfigKey<T> key = new ConfigKey<>(clazz, configId); put(key, ConfigGetter.getConfig(key.getConfigClass(), key.getConfigId())); return this; } public static <T extends ConfigInstance> ConfigMap newMap(Class<T> clazz, String configId) { ConfigMap ret = new ConfigMap(); ret.add(clazz, configId); return ret; } }
Why `session.getSessionZooKeeperClient()` here, and `createSessionZooKeeperClient(session.getSessionId())` elsewhere?
void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); }
Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter();
void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.addSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.addSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.addSession(remoteSession.deactivate()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); remoteSession.deactivate(); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.addSession(newSession); return newApplicationSet; } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.putSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.putSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.putSession(remoteSession.deactivated()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); deactivate(remoteSession); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.putSession(newSession); return newApplicationSet; } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Sounds like imperfect unit tests. Is the delay a little short ?
private void run() { try { Thread.sleep(1000); while (true) { maintenance(); waitForTrigger(2000); } } catch (Exception e) { System.err.println("Fatal exception in FilesArchived-maintainer thread: "+e); } }
Thread.sleep(1000);
private void run() { try { Thread.sleep(1000); while (true) { maintenance(); waitForTrigger(2000); } } catch (Exception e) { System.err.println("Fatal exception in FilesArchived-maintainer thread: "+e); } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private final Object mutex = new Object(); private List<LogFile> knownFiles; public static final long compressAfterMillis = 2L * 3600 * 1000; private static final long maxAgeDays = 30; private static final long sizeLimit = 30L * (1L << 30); private void waitForTrigger(long milliS) throws InterruptedException { synchronized (mutex) { mutex.wait(milliS); } } /** * Creates an instance of FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.setName("FilesArchived-maintainer"); thread.start(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public void triggerMaintenance() { synchronized (mutex) { mutex.notifyAll(); } } synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static final Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { retval.addAll(scanDir(sub)); } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private final Object mutex = new Object(); private List<LogFile> knownFiles; public static final long compressAfterMillis = 2L * 3600 * 1000; private static final long maxAgeDays = 30; private static final long sizeLimit = 30L * (1L << 30); private void waitForTrigger(long milliS) throws InterruptedException { synchronized (mutex) { mutex.wait(milliS); } } /** * Creates an instance of FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.setName("FilesArchived-maintainer"); thread.start(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public void triggerMaintenance() { synchronized (mutex) { mutex.notifyAll(); } } synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static final Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { retval.addAll(scanDir(sub)); } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
Running the test failed every time for me until I added the delay and worked every time afterwards. On a fast machine I guess it usually works.
private void run() { try { Thread.sleep(1000); while (true) { maintenance(); waitForTrigger(2000); } } catch (Exception e) { System.err.println("Fatal exception in FilesArchived-maintainer thread: "+e); } }
Thread.sleep(1000);
private void run() { try { Thread.sleep(1000); while (true) { maintenance(); waitForTrigger(2000); } } catch (Exception e) { System.err.println("Fatal exception in FilesArchived-maintainer thread: "+e); } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private final Object mutex = new Object(); private List<LogFile> knownFiles; public static final long compressAfterMillis = 2L * 3600 * 1000; private static final long maxAgeDays = 30; private static final long sizeLimit = 30L * (1L << 30); private void waitForTrigger(long milliS) throws InterruptedException { synchronized (mutex) { mutex.wait(milliS); } } /** * Creates an instance of FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.setName("FilesArchived-maintainer"); thread.start(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public void triggerMaintenance() { synchronized (mutex) { mutex.notifyAll(); } } synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static final Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { retval.addAll(scanDir(sub)); } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
class FilesArchived { private static final Logger log = Logger.getLogger(FilesArchived.class.getName()); /** * File instance representing root directory of archive */ private final File root; private final Object mutex = new Object(); private List<LogFile> knownFiles; public static final long compressAfterMillis = 2L * 3600 * 1000; private static final long maxAgeDays = 30; private static final long sizeLimit = 30L * (1L << 30); private void waitForTrigger(long milliS) throws InterruptedException { synchronized (mutex) { mutex.wait(milliS); } } /** * Creates an instance of FilesArchive managing the given directory */ public FilesArchived(File rootDir) { this.root = rootDir; rescan(); Thread thread = new Thread(this::run); thread.setDaemon(true); thread.setName("FilesArchived-maintainer"); thread.start(); } public String toString() { return FilesArchived.class.getName() + ": root=" + root; } public synchronized int highestGen(String prefix) { int gen = 0; for (LogFile lf : knownFiles) { if (prefix.equals(lf.prefix)) { gen = Math.max(gen, lf.generation); } } return gen; } public void triggerMaintenance() { synchronized (mutex) { mutex.notifyAll(); } } synchronized boolean maintenance() { boolean action = false; rescan(); if (removeOlderThan(maxAgeDays)) { action = true; rescan(); } if (compressOldFiles()) { action = true; rescan(); } long days = maxAgeDays; while (tooMuchDiskUsage() && (--days > 1)) { if (removeOlderThan(days)) { action = true; rescan(); } } return action; } private void rescan() { knownFiles = scanDir(root); } boolean tooMuchDiskUsage() { long sz = sumFileSizes(); return sz > sizeLimit; } private boolean olderThan(LogFile lf, long days, long now) { long mtime = lf.path.lastModified(); long diff = now - mtime; return (diff > days * 86400L * 1000L); } private boolean removeOlderThan(long days) { boolean action = false; long now = System.currentTimeMillis(); for (LogFile lf : knownFiles) { if (olderThan(lf, days, now)) { lf.path.delete(); log.info("Deleted: "+lf.path); action = true; } } return action; } private boolean compressOldFiles() { long now = System.currentTimeMillis(); int count = 0; for (LogFile lf : knownFiles) { if (lf.canCompress(now) && (count++ < 5)) { compress(lf.path); } } return count > 0; } private void compress(File oldFile) { File gzippedFile = new File(oldFile.getPath() + ".gz"); try (GZIPOutputStream compressor = new GZIPOutputStream(new FileOutputStream(gzippedFile), 0x100000); FileInputStream inputStream = new FileInputStream(oldFile)) { long mtime = oldFile.lastModified(); byte [] buffer = new byte[0x100000]; for (int read = inputStream.read(buffer); read > 0; read = inputStream.read(buffer)) { compressor.write(buffer, 0, read); } compressor.finish(); compressor.flush(); oldFile.delete(); gzippedFile.setLastModified(mtime); log.info("Compressed: "+gzippedFile); } catch (IOException e) { log.warning("Got '" + e + "' while compressing '" + oldFile.getPath() + "'."); } } long sumFileSizes() { long sum = 0; for (LogFile lf : knownFiles) { sum += lf.path.length(); } return sum; } private static final Pattern dateFormatRegexp = Pattern.compile(".*/" + "[0-9][0-9][0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]/" + "[0-9][0-9]-" + "[0-9].*"); private static List<LogFile> scanDir(File top) { List<LogFile> retval = new ArrayList<>(); String[] names = top.list(); if (names != null) { for (String name : names) { File sub = new File(top, name); if (sub.isFile()) { String pathName = sub.toString(); if (dateFormatRegexp.matcher(pathName).matches()) { retval.add(new LogFile(sub)); } else { log.warning("skipping file not matching log archive pattern: "+pathName); } } else if (sub.isDirectory()) { retval.addAll(scanDir(sub)); } } } return retval; } static class LogFile { public final File path; public final String prefix; public final int generation; public final boolean zsuff; public boolean canCompress(long now) { if (zsuff) return false; if (! path.isFile()) return false; long diff = now - path.lastModified(); if (diff < compressAfterMillis) return false; return true; } private static int generationOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return 0; String suff = name.substring(dash + 1); int r = 0; for (char ch : suff.toCharArray()) { if (ch >= '0' && ch <= '9') { r *= 10; r += (ch - '0'); } else { break; } } return r; } private static String prefixOf(String name) { int dash = name.lastIndexOf('-'); if (dash < 0) return name; return name.substring(0, dash); } private static boolean zSuffix(String name) { if (name.endsWith(".gz")) return true; return false; } public LogFile(File path) { String name = path.toString(); this.path = path; this.prefix = prefixOf(name); this.generation = generationOf(name); this.zsuff = zSuffix(name); } public String toString() { return "FilesArchived.LogFile{name="+path+" prefix="+prefix+" gen="+generation+" z="+zsuff+"}"; } } }
Return expression with assignment as side-effect? 😒
protected ZoneId zoneOf(String environment, String region) { if (isNullOrBlank(region)) return zone = controller.defaultZone(isNullOrBlank(environment) ? Environment.dev : Environment.from(environment)); if (isNullOrBlank(environment)) throw new IllegalArgumentException("Environment must be specified if region is specified"); return zone = ZoneId.from(environment, region); }
return zone = controller.defaultZone(isNullOrBlank(environment) ? Environment.dev
protected ZoneId zoneOf(String environment, String region) { if (isNullOrBlank(region)) return zone = controller.defaultZone(isNullOrBlank(environment) ? Environment.dev : Environment.from(environment)); if (isNullOrBlank(environment)) throw new IllegalArgumentException("Environment must be specified if region is specified"); return zone = ZoneId.from(environment, region); }
class AbstractVespaDeploymentMojo extends AbstractVespaMojo { protected ZoneId zone; @Parameter(property = "environment") protected String environment; @Parameter(property = "region") protected String region; @Override protected String name() { return super.name() + "." + instance + " in " + (zone != null ? zone.region() + " in " + zone.environment() : (region == null ? "default region" : region) + " in " + (environment == null ? "default environment (dev)" : environment)); } }
class AbstractVespaDeploymentMojo extends AbstractVespaMojo { protected ZoneId zone; @Parameter(property = "environment") protected String environment; @Parameter(property = "region") protected String region; @Override protected String name() { return super.name() + "." + instance + " in " + (zone != null ? zone.region() + " in " + zone.environment() : (region == null ? "default region" : region) + " in " + (environment == null ? "default environment (dev)" : environment)); } }
`addSession` should perhaps also be named `putSession`?
public void deactivate(RemoteSession remoteSession) { remoteSessionCache.addSession(remoteSession.deactivate()); }
remoteSessionCache.addSession(remoteSession.deactivate());
public void deactivate(RemoteSession remoteSession) { remoteSessionCache.putSession(remoteSession.deactivated()); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.addSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.addSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); remoteSession.deactivate(); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.addSession(newSession); return newApplicationSet; } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.putSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.putSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); deactivate(remoteSession); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.putSession(newSession); return newApplicationSet; } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Shouldn't this also be `deactivate(remoteSession)`? This does nothing now.
public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); remoteSession.deactivate(); }
remoteSession.deactivate();
public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); deactivate(remoteSession); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.addSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.addSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.addSession(remoteSession.deactivate()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.addSession(newSession); return newApplicationSet; } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.putSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.putSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.putSession(remoteSession.deactivated()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.putSession(newSession); return newApplicationSet; } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Yes, both are used, I want to clean up that, will do in a later PR
void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); }
Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter();
void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.addSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.addSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.addSession(remoteSession.deactivate()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); remoteSession.deactivate(); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.addSession(newSession); return newApplicationSet; } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.putSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.putSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.putSession(remoteSession.deactivated()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); deactivate(remoteSession); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.putSession(newSession); return newApplicationSet; } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Agreed, will fix
public void deactivate(RemoteSession remoteSession) { remoteSessionCache.addSession(remoteSession.deactivate()); }
remoteSessionCache.addSession(remoteSession.deactivate());
public void deactivate(RemoteSession remoteSession) { remoteSessionCache.putSession(remoteSession.deactivated()); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.addSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.addSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); remoteSession.deactivate(); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.addSession(newSession); return newApplicationSet; } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.putSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.putSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); deactivate(remoteSession); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.putSession(newSession); return newApplicationSet; } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Yes, thanks
public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); remoteSession.deactivate(); }
remoteSession.deactivate();
public void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); deactivate(remoteSession); }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.addSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.addSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.addSession(remoteSession.deactivate()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.addSession(newSession); return newApplicationSet; } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.putSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.putSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.putSession(remoteSession.deactivated()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } public synchronized ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.putSession(newSession); return newApplicationSet; } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.INFO, "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
```suggestion .stream() ```
public void updateTestReport(RunId id) { locked(id, run -> { if(run.stepStatus(endTests) .stream().peek(status -> System.out.println("endTests status: " + status.name())) .noneMatch(status -> status != Step.Status.unfinished)) { return run; } Optional<String> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone(controller.system()))); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); }
.stream().peek(status -> System.out.println("endTests status: " + status.name()))
public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone(controller.system()))); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); }
class JobController { public static final int historyLength = 64; public static final Duration maxHistoryAge = Duration.ofDays(60); private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final Badges badges; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.badges = new Badges(controller.zoneRegistry().badgeUrl()); this.metric = new JobMetrics(controller.metric(), controller.system()); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Lock __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; ZoneId zone = id.type().zone(controller.system()); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at(); List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log); return run.with(log.get(log.size() - 1).at()); }); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone(controller.system())), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public Optional<String> getTestReport(RunId id) { return logs.readTestReport(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return copyOf(controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .iterator()); } /** Returns all job types which have been run for the given application. */ public List<JobType> jobs(ApplicationId id) { return copyOf(Stream.of(JobType.values()) .filter(type -> last(id, type).isPresent()) .iterator()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, if it exists. */ public Optional<Run> run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny(); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .collect(toUnmodifiableList()); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return copyOf(controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> Stream.of(JobType.values()) .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .iterator()); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return copyOf(Stream.of(JobType.values()) .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded()) .iterator()); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { return deploymentStatus(application, controller.systemVersion()); } private DeploymentStatus deploymentStatus(Application application, Version systemVersion) { return new DeploymentStatus(application, DeploymentStatus.jobsFor(application, controller.system()).stream() .collect(toMap(job -> job, job -> jobStatus(job), (j1, j2) -> { throw new IllegalArgumentException("Duplicate key " + j1.id()); }, LinkedHashMap::new)), controller.system(), systemVersion, controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, Version systemVersion) { return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, systemVersion)) .collect(toUnmodifiableList())); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { return deploymentStatuses(applications, controller.systemVersion()); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** Invoked when starting the step */ public void setStartTimestamp(RunId id, Instant timestamp, LockedStep step) { locked(id, run -> run.with(timestamp, step)); } /** Changes the status of the given run to inactive, and stores it as a historic run. */ public void finish(RunId id) { locked(id, run -> { Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(old -> old.status() == RunStatus.success).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().status() == RunStatus.success && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); return finishedRun; }); } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id) { locked(id, run -> run.aborted()); } /** * Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Optional<SourceRevision> revision, Optional<String> authorEmail, Optional<String> sourceUrl, long projectId, ApplicationPackage applicationPackage, byte[] testPackageBytes) { AtomicReference<ApplicationVersion> version = new AtomicReference<>(); controller.applications().lockApplicationOrThrow(id, application -> { long run = 1 + application.get().latestVersion() .map(latestVersion -> latestVersion.buildNumber().getAsLong()) .orElse(0L); version.set(ApplicationVersion.from(revision, run, authorEmail, applicationPackage.compileVersion(), applicationPackage.buildTime(), sourceUrl, revision.map(SourceRevision::commit))); controller.applications().applicationStore().put(id.tenant(), id.application(), version.get(), applicationPackage.zippedContent()); controller.applications().applicationStore().putTester(id.tenant(), id.application(), version.get(), testPackageBytes); prunePackages(id); controller.applications().storeWithUpdatedConfig(application, applicationPackage); controller.applications().deploymentTrigger().notifyOfSubmission(id, version.get(), projectId); }); return version.get(); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions) { start(id, type, versions, JobProfile.of(type)); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, JobProfile profile) { locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant(), profile)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id())); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent()); start(id, type, new Versions(platform.orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(controller.applications()::lastCompatibleVersion) .orElseGet(controller::systemVersion)), ApplicationVersion.unknown, Optional.empty(), Optional.empty()), JobProfile.development); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id) { abort(id); runner.get().accept(last(id.application(), id.type()).get()); while ( ! last(id.application(), id.type()).get().hasEnded()) { try { Thread.sleep(100); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Lock ___ = curator.lock(id, type)) { deactivateTester(tester, type); curator.deleteRunData(id, type); logs.delete(id); } }); } catch (TimeoutException e) { return; } curator.deleteRunData(id); }); } public void deactivateTester(TesterId id, JobType type) { var zone = type.zone(controller.system()); try { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), zone)); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(id.id(), DeploymentSpec.empty, zone); } } /** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */ public URI historicBadge(ApplicationId id, JobType type, int historyLength) { List<Run> runs = new ArrayList<>(runs(id, type).values()); Run lastCompleted = null; if (runs.size() > 0) lastCompleted = runs.get(runs.size() - 1); if (runs.size() > 1 && ! lastCompleted.hasEnded()) lastCompleted = runs.get(runs.size() - 2); return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size())); } /** Returns a URI which points at a badge showing current status for all jobs for the given application. */ public URI overviewBadge(ApplicationId id) { DeploymentSteps steps = new DeploymentSteps(controller.applications().requireApplication(TenantAndApplicationId.from(id)) .deploymentSpec().requireInstance(id.instance()), controller::system); return badges.overview(id, steps.jobs().stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .collect(toList())); } private void prunePackages(TenantAndApplicationId id) { controller.applications().lockApplicationIfPresent(id, application -> { application.get().productionDeployments().values().stream() .flatMap(List::stream) .map(Deployment::applicationVersion) .filter(version -> ! version.isUnknown()) .min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong())) .ifPresent(oldestDeployed -> { controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); controller.applications().applicationStore().pruneTesters(id.tenant(), id.application(), oldestDeployed); }); }); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Lock __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Lock __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { run = modifications.apply(run); curator.writeLastRun(run); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Lock lock = curator.lock(id, type, step)) { for (Step prerequisite : step.prerequisites()) try (Lock __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final int historyLength = 64; public static final Duration maxHistoryAge = Duration.ofDays(60); private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final Badges badges; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.badges = new Badges(controller.zoneRegistry().badgeUrl()); this.metric = new JobMetrics(controller.metric(), controller.system()); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Lock __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; ZoneId zone = id.type().zone(controller.system()); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at(); List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log); return run.with(log.get(log.size() - 1).at()); }); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone(controller.system())), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public Optional<String> getTestReport(RunId id) { return logs.readTestReport(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return copyOf(controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .iterator()); } /** Returns all job types which have been run for the given application. */ public List<JobType> jobs(ApplicationId id) { return copyOf(Stream.of(JobType.values()) .filter(type -> last(id, type).isPresent()) .iterator()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, if it exists. */ public Optional<Run> run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny(); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .collect(toUnmodifiableList()); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return copyOf(controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> Stream.of(JobType.values()) .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .iterator()); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return copyOf(Stream.of(JobType.values()) .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded()) .iterator()); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { return deploymentStatus(application, controller.systemVersion()); } private DeploymentStatus deploymentStatus(Application application, Version systemVersion) { return new DeploymentStatus(application, DeploymentStatus.jobsFor(application, controller.system()).stream() .collect(toMap(job -> job, job -> jobStatus(job), (j1, j2) -> { throw new IllegalArgumentException("Duplicate key " + j1.id()); }, LinkedHashMap::new)), controller.system(), systemVersion, controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, Version systemVersion) { return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, systemVersion)) .collect(toUnmodifiableList())); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { return deploymentStatuses(applications, controller.systemVersion()); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** Invoked when starting the step */ public void setStartTimestamp(RunId id, Instant timestamp, LockedStep step) { locked(id, run -> run.with(timestamp, step)); } /** Changes the status of the given run to inactive, and stores it as a historic run. */ public void finish(RunId id) { locked(id, run -> { Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(old -> old.status() == RunStatus.success).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().status() == RunStatus.success && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); return finishedRun; }); } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id) { locked(id, run -> run.aborted()); } /** * Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Optional<SourceRevision> revision, Optional<String> authorEmail, Optional<String> sourceUrl, long projectId, ApplicationPackage applicationPackage, byte[] testPackageBytes) { AtomicReference<ApplicationVersion> version = new AtomicReference<>(); controller.applications().lockApplicationOrThrow(id, application -> { long run = 1 + application.get().latestVersion() .map(latestVersion -> latestVersion.buildNumber().getAsLong()) .orElse(0L); version.set(ApplicationVersion.from(revision, run, authorEmail, applicationPackage.compileVersion(), applicationPackage.buildTime(), sourceUrl, revision.map(SourceRevision::commit))); controller.applications().applicationStore().put(id.tenant(), id.application(), version.get(), applicationPackage.zippedContent()); controller.applications().applicationStore().putTester(id.tenant(), id.application(), version.get(), testPackageBytes); prunePackages(id); controller.applications().storeWithUpdatedConfig(application, applicationPackage); controller.applications().deploymentTrigger().notifyOfSubmission(id, version.get(), projectId); }); return version.get(); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions) { start(id, type, versions, JobProfile.of(type)); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, JobProfile profile) { locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant(), profile)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id())); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent()); start(id, type, new Versions(platform.orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(controller.applications()::lastCompatibleVersion) .orElseGet(controller::systemVersion)), ApplicationVersion.unknown, Optional.empty(), Optional.empty()), JobProfile.development); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id) { abort(id); runner.get().accept(last(id.application(), id.type()).get()); while ( ! last(id.application(), id.type()).get().hasEnded()) { try { Thread.sleep(100); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Lock ___ = curator.lock(id, type)) { deactivateTester(tester, type); curator.deleteRunData(id, type); logs.delete(id); } }); } catch (TimeoutException e) { return; } curator.deleteRunData(id); }); } public void deactivateTester(TesterId id, JobType type) { var zone = type.zone(controller.system()); try { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), zone)); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(id.id(), DeploymentSpec.empty, zone); } } /** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */ public URI historicBadge(ApplicationId id, JobType type, int historyLength) { List<Run> runs = new ArrayList<>(runs(id, type).values()); Run lastCompleted = null; if (runs.size() > 0) lastCompleted = runs.get(runs.size() - 1); if (runs.size() > 1 && ! lastCompleted.hasEnded()) lastCompleted = runs.get(runs.size() - 2); return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size())); } /** Returns a URI which points at a badge showing current status for all jobs for the given application. */ public URI overviewBadge(ApplicationId id) { DeploymentSteps steps = new DeploymentSteps(controller.applications().requireApplication(TenantAndApplicationId.from(id)) .deploymentSpec().requireInstance(id.instance()), controller::system); return badges.overview(id, steps.jobs().stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .collect(toList())); } private void prunePackages(TenantAndApplicationId id) { controller.applications().lockApplicationIfPresent(id, application -> { application.get().productionDeployments().values().stream() .flatMap(List::stream) .map(Deployment::applicationVersion) .filter(version -> ! version.isUnknown()) .min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong())) .ifPresent(oldestDeployed -> { controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); controller.applications().applicationStore().pruneTesters(id.tenant(), id.application(), oldestDeployed); }); }); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Lock __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Lock __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { run = modifications.apply(run); curator.writeLastRun(run); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Lock lock = curator.lock(id, type, step)) { for (Step prerequisite : step.prerequisites()) try (Lock __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
removed
public void updateTestReport(RunId id) { locked(id, run -> { if(run.stepStatus(endTests) .stream().peek(status -> System.out.println("endTests status: " + status.name())) .noneMatch(status -> status != Step.Status.unfinished)) { return run; } Optional<String> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone(controller.system()))); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); }
.stream().peek(status -> System.out.println("endTests status: " + status.name()))
public void updateTestReport(RunId id) { locked(id, run -> { Optional<TestReport> report = cloud.getTestReport(new DeploymentId(id.tester().id(), id.type().zone(controller.system()))); if (report.isEmpty()) { return run; } logs.writeTestReport(id, report.get()); return run; }); }
class JobController { public static final int historyLength = 64; public static final Duration maxHistoryAge = Duration.ofDays(60); private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final Badges badges; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.badges = new Badges(controller.zoneRegistry().badgeUrl()); this.metric = new JobMetrics(controller.metric(), controller.system()); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Lock __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; ZoneId zone = id.type().zone(controller.system()); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at(); List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log); return run.with(log.get(log.size() - 1).at()); }); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone(controller.system())), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public Optional<String> getTestReport(RunId id) { return logs.readTestReport(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return copyOf(controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .iterator()); } /** Returns all job types which have been run for the given application. */ public List<JobType> jobs(ApplicationId id) { return copyOf(Stream.of(JobType.values()) .filter(type -> last(id, type).isPresent()) .iterator()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, if it exists. */ public Optional<Run> run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny(); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .collect(toUnmodifiableList()); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return copyOf(controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> Stream.of(JobType.values()) .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .iterator()); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return copyOf(Stream.of(JobType.values()) .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded()) .iterator()); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { return deploymentStatus(application, controller.systemVersion()); } private DeploymentStatus deploymentStatus(Application application, Version systemVersion) { return new DeploymentStatus(application, DeploymentStatus.jobsFor(application, controller.system()).stream() .collect(toMap(job -> job, job -> jobStatus(job), (j1, j2) -> { throw new IllegalArgumentException("Duplicate key " + j1.id()); }, LinkedHashMap::new)), controller.system(), systemVersion, controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, Version systemVersion) { return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, systemVersion)) .collect(toUnmodifiableList())); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { return deploymentStatuses(applications, controller.systemVersion()); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** Invoked when starting the step */ public void setStartTimestamp(RunId id, Instant timestamp, LockedStep step) { locked(id, run -> run.with(timestamp, step)); } /** Changes the status of the given run to inactive, and stores it as a historic run. */ public void finish(RunId id) { locked(id, run -> { Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(old -> old.status() == RunStatus.success).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().status() == RunStatus.success && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); return finishedRun; }); } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id) { locked(id, run -> run.aborted()); } /** * Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Optional<SourceRevision> revision, Optional<String> authorEmail, Optional<String> sourceUrl, long projectId, ApplicationPackage applicationPackage, byte[] testPackageBytes) { AtomicReference<ApplicationVersion> version = new AtomicReference<>(); controller.applications().lockApplicationOrThrow(id, application -> { long run = 1 + application.get().latestVersion() .map(latestVersion -> latestVersion.buildNumber().getAsLong()) .orElse(0L); version.set(ApplicationVersion.from(revision, run, authorEmail, applicationPackage.compileVersion(), applicationPackage.buildTime(), sourceUrl, revision.map(SourceRevision::commit))); controller.applications().applicationStore().put(id.tenant(), id.application(), version.get(), applicationPackage.zippedContent()); controller.applications().applicationStore().putTester(id.tenant(), id.application(), version.get(), testPackageBytes); prunePackages(id); controller.applications().storeWithUpdatedConfig(application, applicationPackage); controller.applications().deploymentTrigger().notifyOfSubmission(id, version.get(), projectId); }); return version.get(); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions) { start(id, type, versions, JobProfile.of(type)); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, JobProfile profile) { locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant(), profile)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id())); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent()); start(id, type, new Versions(platform.orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(controller.applications()::lastCompatibleVersion) .orElseGet(controller::systemVersion)), ApplicationVersion.unknown, Optional.empty(), Optional.empty()), JobProfile.development); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id) { abort(id); runner.get().accept(last(id.application(), id.type()).get()); while ( ! last(id.application(), id.type()).get().hasEnded()) { try { Thread.sleep(100); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Lock ___ = curator.lock(id, type)) { deactivateTester(tester, type); curator.deleteRunData(id, type); logs.delete(id); } }); } catch (TimeoutException e) { return; } curator.deleteRunData(id); }); } public void deactivateTester(TesterId id, JobType type) { var zone = type.zone(controller.system()); try { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), zone)); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(id.id(), DeploymentSpec.empty, zone); } } /** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */ public URI historicBadge(ApplicationId id, JobType type, int historyLength) { List<Run> runs = new ArrayList<>(runs(id, type).values()); Run lastCompleted = null; if (runs.size() > 0) lastCompleted = runs.get(runs.size() - 1); if (runs.size() > 1 && ! lastCompleted.hasEnded()) lastCompleted = runs.get(runs.size() - 2); return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size())); } /** Returns a URI which points at a badge showing current status for all jobs for the given application. */ public URI overviewBadge(ApplicationId id) { DeploymentSteps steps = new DeploymentSteps(controller.applications().requireApplication(TenantAndApplicationId.from(id)) .deploymentSpec().requireInstance(id.instance()), controller::system); return badges.overview(id, steps.jobs().stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .collect(toList())); } private void prunePackages(TenantAndApplicationId id) { controller.applications().lockApplicationIfPresent(id, application -> { application.get().productionDeployments().values().stream() .flatMap(List::stream) .map(Deployment::applicationVersion) .filter(version -> ! version.isUnknown()) .min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong())) .ifPresent(oldestDeployed -> { controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); controller.applications().applicationStore().pruneTesters(id.tenant(), id.application(), oldestDeployed); }); }); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Lock __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Lock __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { run = modifications.apply(run); curator.writeLastRun(run); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Lock lock = curator.lock(id, type, step)) { for (Step prerequisite : step.prerequisites()) try (Lock __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
class JobController { public static final int historyLength = 64; public static final Duration maxHistoryAge = Duration.ofDays(60); private final Controller controller; private final CuratorDb curator; private final BufferedLogStore logs; private final TesterCloud cloud; private final Badges badges; private final JobMetrics metric; private final AtomicReference<Consumer<Run>> runner = new AtomicReference<>(__ -> { }); public JobController(Controller controller) { this.controller = controller; this.curator = controller.curator(); this.logs = new BufferedLogStore(curator, controller.serviceRegistry().runDataStore()); this.cloud = controller.serviceRegistry().testerCloud(); this.badges = new Badges(controller.zoneRegistry().badgeUrl()); this.metric = new JobMetrics(controller.metric(), controller.system()); } public TesterCloud cloud() { return cloud; } public int historyLength() { return historyLength; } public void setRunner(Consumer<Run> runner) { this.runner.set(runner); } /** Rewrite all job data with the newest format. */ public void updateStorage() { for (ApplicationId id : instances()) for (JobType type : jobs(id)) { locked(id, type, runs -> { curator.readLastRun(id, type).ifPresent(curator::writeLastRun); }); } } /** Returns all entries currently logged for the given run. */ public Optional<RunLog> details(RunId id) { return details(id, -1); } /** Returns the logged entries for the given run, which are after the given id threshold. */ public Optional<RunLog> details(RunId id, long after) { try (Lock __ = curator.lock(id.application(), id.type())) { Run run = runs(id.application(), id.type()).get(id); if (run == null) return Optional.empty(); return active(id).isPresent() ? Optional.of(logs.readActive(id.application(), id.type(), after)) : logs.readFinished(id, after); } } /** Stores the given log entries for the given run and step. */ public void log(RunId id, Step step, List<LogEntry> entries) { locked(id, __ -> { logs.append(id.application(), id.type(), step, entries); return __; }); } /** Stores the given log messages for the given run and step. */ public void log(RunId id, Step step, Level level, List<String> messages) { log(id, step, messages.stream() .map(message -> new LogEntry(0, controller.clock().instant(), LogEntry.typeOf(level), message)) .collect(toList())); } /** Stores the given log message for the given run and step. */ public void log(RunId id, Step step, Level level, String message) { log(id, step, level, Collections.singletonList(message)); } /** Fetches any new Vespa log entries, and records the timestamp of the last of these, for continuation. */ public void updateVespaLog(RunId id) { locked(id, run -> { if ( ! run.hasStep(copyVespaLogs)) return run; ZoneId zone = id.type().zone(controller.system()); Optional<Deployment> deployment = Optional.ofNullable(controller.applications().requireInstance(id.application()) .deployments().get(zone)); if (deployment.isEmpty() || deployment.get().at().isBefore(run.start())) return run; Instant from = run.lastVespaLogTimestamp().isAfter(deployment.get().at()) ? run.lastVespaLogTimestamp() : deployment.get().at(); List<LogEntry> log = LogEntry.parseVespaLog(controller.serviceRegistry().configServer() .getLogs(new DeploymentId(id.application(), zone), Map.of("from", Long.toString(from.toEpochMilli()))), from); if (log.isEmpty()) return run; logs.append(id.application(), id.type(), Step.copyVespaLogs, log); return run.with(log.get(log.size() - 1).at()); }); } /** Fetches any new test log entries, and records the id of the last of these, for continuation. */ public void updateTestLog(RunId id) { locked(id, run -> { Optional<Step> step = Stream.of(endStagingSetup, endTests) .filter(run.readySteps()::contains) .findAny(); if (step.isEmpty()) return run; List<LogEntry> entries = cloud.getLog(new DeploymentId(id.tester().id(), id.type().zone(controller.system())), run.lastTestLogEntry()); if (entries.isEmpty()) return run; logs.append(id.application(), id.type(), step.get(), entries); return run.with(entries.stream().mapToLong(LogEntry::id).max().getAsLong()); }); } public Optional<String> getTestReport(RunId id) { return logs.readTestReport(id); } /** Stores the given certificate as the tester certificate for this run, or throws if it's already set. */ public void storeTesterCertificate(RunId id, X509Certificate testerCertificate) { locked(id, run -> run.with(testerCertificate)); } /** Returns a list of all instances of applications which have registered. */ public List<ApplicationId> instances() { return copyOf(controller.applications().readable().stream() .flatMap(application -> application.instances().values().stream()) .map(Instance::id) .iterator()); } /** Returns all job types which have been run for the given application. */ public List<JobType> jobs(ApplicationId id) { return copyOf(Stream.of(JobType.values()) .filter(type -> last(id, type).isPresent()) .iterator()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(JobId id) { return runs(id.application(), id.type()); } /** Returns an immutable map of all known runs for the given application and job type. */ public NavigableMap<RunId, Run> runs(ApplicationId id, JobType type) { ImmutableSortedMap.Builder<RunId, Run> runs = ImmutableSortedMap.orderedBy(Comparator.comparing(RunId::number)); Optional<Run> last = last(id, type); curator.readHistoricRuns(id, type).forEach((runId, run) -> { if (last.isEmpty() || ! runId.equals(last.get().id())) runs.put(runId, run); }); last.ifPresent(run -> runs.put(run.id(), run)); return runs.build(); } /** Returns the run with the given id, if it exists. */ public Optional<Run> run(RunId id) { return runs(id.application(), id.type()).values().stream() .filter(run -> run.id().equals(id)) .findAny(); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(JobId job) { return curator.readLastRun(job.application(), job.type()); } /** Returns the last run of the given type, for the given application, if one has been run. */ public Optional<Run> last(ApplicationId id, JobType type) { return curator.readLastRun(id, type); } /** Returns the last completed of the given job. */ public Optional<Run> lastCompleted(JobId id) { return JobStatus.lastCompleted(runs(id)); } /** Returns the first failing of the given job. */ public Optional<Run> firstFailing(JobId id) { return JobStatus.firstFailing(runs(id)); } /** Returns the last success of the given job. */ public Optional<Run> lastSuccess(JobId id) { return JobStatus.lastSuccess(runs(id)); } /** Returns the run with the given id, provided it is still active. */ public Optional<Run> active(RunId id) { return last(id.application(), id.type()) .filter(run -> ! run.hasEnded()) .filter(run -> run.id().equals(id)); } /** Returns a list of all active runs. */ public List<Run> active() { return controller.applications().idList().stream() .flatMap(id -> active(id).stream()) .collect(toUnmodifiableList()); } /** Returns a list of all active runs for the given application. */ public List<Run> active(TenantAndApplicationId id) { return copyOf(controller.applications().requireApplication(id).instances().keySet().stream() .flatMap(name -> Stream.of(JobType.values()) .map(type -> last(id.instance(name), type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded())) .iterator()); } /** Returns a list of all active runs for the given instance. */ public List<Run> active(ApplicationId id) { return copyOf(Stream.of(JobType.values()) .map(type -> last(id, type)) .flatMap(Optional::stream) .filter(run -> ! run.hasEnded()) .iterator()); } /** Returns the job status of the given job, possibly empty. */ public JobStatus jobStatus(JobId id) { return new JobStatus(id, runs(id)); } /** Returns the deployment status of the given application. */ public DeploymentStatus deploymentStatus(Application application) { return deploymentStatus(application, controller.systemVersion()); } private DeploymentStatus deploymentStatus(Application application, Version systemVersion) { return new DeploymentStatus(application, DeploymentStatus.jobsFor(application, controller.system()).stream() .collect(toMap(job -> job, job -> jobStatus(job), (j1, j2) -> { throw new IllegalArgumentException("Duplicate key " + j1.id()); }, LinkedHashMap::new)), controller.system(), systemVersion, controller.clock().instant()); } /** Adds deployment status to each of the given applications. */ public DeploymentStatusList deploymentStatuses(ApplicationList applications, Version systemVersion) { return DeploymentStatusList.from(applications.asList().stream() .map(application -> deploymentStatus(application, systemVersion)) .collect(toUnmodifiableList())); } /** Adds deployment status to each of the given applications. Calling this will do an implicit read of the controller's version status */ public DeploymentStatusList deploymentStatuses(ApplicationList applications) { return deploymentStatuses(applications, controller.systemVersion()); } /** Changes the status of the given step, for the given run, provided it is still active. */ public void update(RunId id, RunStatus status, LockedStep step) { locked(id, run -> run.with(status, step)); } /** Invoked when starting the step */ public void setStartTimestamp(RunId id, Instant timestamp, LockedStep step) { locked(id, run -> run.with(timestamp, step)); } /** Changes the status of the given run to inactive, and stores it as a historic run. */ public void finish(RunId id) { locked(id, run -> { Run finishedRun = run.finished(controller.clock().instant()); locked(id.application(), id.type(), runs -> { runs.put(run.id(), finishedRun); long last = id.number(); long successes = runs.values().stream().filter(old -> old.status() == RunStatus.success).count(); var oldEntries = runs.entrySet().iterator(); for (var old = oldEntries.next(); old.getKey().number() <= last - historyLength || old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge)); old = oldEntries.next()) { if ( successes == 1 && old.getValue().status() == RunStatus.success && ! old.getValue().start().isBefore(controller.clock().instant().minus(maxHistoryAge))) { oldEntries.next(); continue; } logs.delete(old.getKey()); oldEntries.remove(); } }); logs.flush(id); metric.jobFinished(run.id().job(), finishedRun.status()); return finishedRun; }); } /** Marks the given run as aborted; no further normal steps will run, but run-always steps will try to succeed. */ public void abort(RunId id) { locked(id, run -> run.aborted()); } /** * Accepts and stores a new application package and test jar pair under a generated application version key. */ public ApplicationVersion submit(TenantAndApplicationId id, Optional<SourceRevision> revision, Optional<String> authorEmail, Optional<String> sourceUrl, long projectId, ApplicationPackage applicationPackage, byte[] testPackageBytes) { AtomicReference<ApplicationVersion> version = new AtomicReference<>(); controller.applications().lockApplicationOrThrow(id, application -> { long run = 1 + application.get().latestVersion() .map(latestVersion -> latestVersion.buildNumber().getAsLong()) .orElse(0L); version.set(ApplicationVersion.from(revision, run, authorEmail, applicationPackage.compileVersion(), applicationPackage.buildTime(), sourceUrl, revision.map(SourceRevision::commit))); controller.applications().applicationStore().put(id.tenant(), id.application(), version.get(), applicationPackage.zippedContent()); controller.applications().applicationStore().putTester(id.tenant(), id.application(), version.get(), testPackageBytes); prunePackages(id); controller.applications().storeWithUpdatedConfig(application, applicationPackage); controller.applications().deploymentTrigger().notifyOfSubmission(id, version.get(), projectId); }); return version.get(); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions) { start(id, type, versions, JobProfile.of(type)); } /** Orders a run of the given type, or throws an IllegalStateException if that job type is already running. */ public void start(ApplicationId id, JobType type, Versions versions, JobProfile profile) { locked(id, type, __ -> { Optional<Run> last = last(id, type); if (last.flatMap(run -> active(run.id())).isPresent()) throw new IllegalStateException("Can not start " + type + " for " + id + "; it is already running!"); RunId newId = new RunId(id, type, last.map(run -> run.id().number()).orElse(0L) + 1); curator.writeLastRun(Run.initial(newId, versions, controller.clock().instant(), profile)); metric.jobStarted(newId.job()); }); } /** Stores the given package and starts a deployment of it, after aborting any such ongoing deployment. */ public void deploy(ApplicationId id, JobType type, Optional<Version> platform, ApplicationPackage applicationPackage) { controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { if ( ! application.get().instances().containsKey(id.instance())) application = controller.applications().withNewInstance(application, id); controller.applications().store(application); }); last(id, type).filter(run -> ! run.hasEnded()).ifPresent(run -> abortAndWait(run.id())); controller.applications().lockApplicationOrThrow(TenantAndApplicationId.from(id), application -> { controller.applications().applicationStore().putDev(id, type.zone(controller.system()), applicationPackage.zippedContent()); start(id, type, new Versions(platform.orElse(applicationPackage.deploymentSpec().majorVersion() .flatMap(controller.applications()::lastCompatibleVersion) .orElseGet(controller::systemVersion)), ApplicationVersion.unknown, Optional.empty(), Optional.empty()), JobProfile.development); }); locked(id, type, __ -> { runner.get().accept(last(id, type).get()); }); } /** Aborts a run and waits for it complete. */ private void abortAndWait(RunId id) { abort(id); runner.get().accept(last(id.application(), id.type()).get()); while ( ! last(id.application(), id.type()).get().hasEnded()) { try { Thread.sleep(100); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } } /** Deletes run data and tester deployments for applications which are unknown, or no longer built internally. */ public void collectGarbage() { Set<ApplicationId> applicationsToBuild = new HashSet<>(instances()); curator.applicationsWithJobs().stream() .filter(id -> ! applicationsToBuild.contains(id)) .forEach(id -> { try { TesterId tester = TesterId.of(id); for (JobType type : jobs(id)) locked(id, type, deactivateTester, __ -> { try (Lock ___ = curator.lock(id, type)) { deactivateTester(tester, type); curator.deleteRunData(id, type); logs.delete(id); } }); } catch (TimeoutException e) { return; } curator.deleteRunData(id); }); } public void deactivateTester(TesterId id, JobType type) { var zone = type.zone(controller.system()); try { controller.serviceRegistry().configServer().deactivate(new DeploymentId(id.id(), zone)); } catch (NotFoundException ignored) { } finally { controller.routing().policies().refresh(id.id(), DeploymentSpec.empty, zone); } } /** Returns a URI which points at a badge showing historic status of given length for the given job type for the given application. */ public URI historicBadge(ApplicationId id, JobType type, int historyLength) { List<Run> runs = new ArrayList<>(runs(id, type).values()); Run lastCompleted = null; if (runs.size() > 0) lastCompleted = runs.get(runs.size() - 1); if (runs.size() > 1 && ! lastCompleted.hasEnded()) lastCompleted = runs.get(runs.size() - 2); return badges.historic(id, Optional.ofNullable(lastCompleted), runs.subList(Math.max(0, runs.size() - historyLength), runs.size())); } /** Returns a URI which points at a badge showing current status for all jobs for the given application. */ public URI overviewBadge(ApplicationId id) { DeploymentSteps steps = new DeploymentSteps(controller.applications().requireApplication(TenantAndApplicationId.from(id)) .deploymentSpec().requireInstance(id.instance()), controller::system); return badges.overview(id, steps.jobs().stream() .map(type -> last(id, type)) .flatMap(Optional::stream) .collect(toList())); } private void prunePackages(TenantAndApplicationId id) { controller.applications().lockApplicationIfPresent(id, application -> { application.get().productionDeployments().values().stream() .flatMap(List::stream) .map(Deployment::applicationVersion) .filter(version -> ! version.isUnknown()) .min(Comparator.comparingLong(applicationVersion -> applicationVersion.buildNumber().getAsLong())) .ifPresent(oldestDeployed -> { controller.applications().applicationStore().prune(id.tenant(), id.application(), oldestDeployed); controller.applications().applicationStore().pruneTesters(id.tenant(), id.application(), oldestDeployed); }); }); } /** Locks all runs and modifies the list of historic runs for the given application and job type. */ private void locked(ApplicationId id, JobType type, Consumer<SortedMap<RunId, Run>> modifications) { try (Lock __ = curator.lock(id, type)) { SortedMap<RunId, Run> runs = curator.readHistoricRuns(id, type); modifications.accept(runs); curator.writeHistoricRuns(id, type, runs.values()); } } /** Locks and modifies the run with the given id, provided it is still active. */ public void locked(RunId id, UnaryOperator<Run> modifications) { try (Lock __ = curator.lock(id.application(), id.type())) { active(id).ifPresent(run -> { run = modifications.apply(run); curator.writeLastRun(run); }); } } /** Locks the given step and checks none of its prerequisites are running, then performs the given actions. */ public void locked(ApplicationId id, JobType type, Step step, Consumer<LockedStep> action) throws TimeoutException { try (Lock lock = curator.lock(id, type, step)) { for (Step prerequisite : step.prerequisites()) try (Lock __ = curator.lock(id, type, prerequisite)) { ; } action.accept(new LockedStep(lock, step)); } } }
How about comparing the types, instead of assuming this is a request type?
private static boolean effectivelyDuplicateOf(FilterBinding accessControlBinding, FilterBinding other) { if (accessControlBinding.chainId().equals(other.chainId())) return false; if (other.type() == FilterBinding.Type.RESPONSE) return false; return accessControlBinding.binding().equals(other.binding()) || (accessControlBinding.binding().path().equals(other.binding().path()) && other.binding().matchesAnyPort()); }
if (other.type() == FilterBinding.Type.RESPONSE) return false;
private static boolean effectivelyDuplicateOf(FilterBinding accessControlBinding, FilterBinding other) { if (accessControlBinding.chainId().equals(other.chainId())) return false; if (other.type() == FilterBinding.Type.RESPONSE) return false; return accessControlBinding.binding().equals(other.binding()) || (accessControlBinding.binding().path().equals(other.binding().path()) && other.binding().matchesAnyPort()); }
class Builder { private final String domain; private boolean readEnabled = false; private boolean writeEnabled = true; private final Set<BindingPattern> excludeBindings = new LinkedHashSet<>(); private Collection<Handler<?>> handlers = Collections.emptyList(); public Builder(String domain) { this.domain = domain; } public Builder readEnabled(boolean readEnabled) { this.readEnabled = readEnabled; return this; } public Builder writeEnabled(boolean writeEnabled) { this.writeEnabled = writeEnabled; return this; } public Builder excludeBinding(BindingPattern binding) { this.excludeBindings.add(binding); return this; } public Builder setHandlers(ApplicationContainerCluster cluster) { this.handlers = cluster.getHandlers(); return this; } public AccessControl build() { return new AccessControl(domain, writeEnabled, readEnabled, excludeBindings, handlers); } }
class Builder { private final String domain; private boolean readEnabled = false; private boolean writeEnabled = true; private final Set<BindingPattern> excludeBindings = new LinkedHashSet<>(); private Collection<Handler<?>> handlers = Collections.emptyList(); public Builder(String domain) { this.domain = domain; } public Builder readEnabled(boolean readEnabled) { this.readEnabled = readEnabled; return this; } public Builder writeEnabled(boolean writeEnabled) { this.writeEnabled = writeEnabled; return this; } public Builder excludeBinding(BindingPattern binding) { this.excludeBindings.add(binding); return this; } public Builder setHandlers(ApplicationContainerCluster cluster) { this.handlers = cluster.getHandlers(); return this; } public AccessControl build() { return new AccessControl(domain, writeEnabled, readEnabled, excludeBindings, handlers); } }
Would prefer explicit check for `"response-chain"` and throw otherwise, just to be absolutely future-proff.
private List<FilterBinding> readFilterBindings(Element filteringSpec) { List<FilterBinding> result = new ArrayList<>(); for (Element child: XML.getChildren(filteringSpec)) { String tagName = child.getTagName(); if ((tagName.equals("request-chain") || tagName.equals("response-chain"))) { ComponentSpecification chainId = XmlHelper.getIdRef(child); for (Element bindingSpec: XML.getChildren(child, "binding")) { String binding = XML.getValue(bindingSpec); FilterBinding.Type type = tagName.equals("request-chain") ? FilterBinding.Type.REQUEST : FilterBinding.Type.RESPONSE; result.add(FilterBinding.create(type, chainId, UserBindingPattern.fromPattern(binding))); } } } return result; }
FilterBinding.Type type = tagName.equals("request-chain") ? FilterBinding.Type.REQUEST : FilterBinding.Type.RESPONSE;
private List<FilterBinding> readFilterBindings(Element filteringSpec) { List<FilterBinding> result = new ArrayList<>(); for (Element child: XML.getChildren(filteringSpec)) { String tagName = child.getTagName(); if (VALID_FILTER_CHAIN_TAG_NAMES.contains(tagName)) { ComponentSpecification chainId = XmlHelper.getIdRef(child); for (Element bindingSpec: XML.getChildren(child, "binding")) { String binding = XML.getValue(bindingSpec); result.add(FilterBinding.create(toFilterBindingType(tagName), chainId, UserBindingPattern.fromPattern(binding))); } } } return result; }
class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Http> { @Override protected Http doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element spec) { FilterChains filterChains; List<FilterBinding> bindings = new ArrayList<>(); AccessControl accessControl = null; Element filteringElem = XML.getChild(spec, "filtering"); if (filteringElem != null) { filterChains = new FilterChainsBuilder().build(deployState, ancestor, filteringElem); bindings = readFilterBindings(filteringElem); Element accessControlElem = XML.getChild(filteringElem, "access-control"); if (accessControlElem != null) { accessControl = buildAccessControl(deployState, ancestor, accessControlElem); } } else { filterChains = new FilterChainsBuilder().newChainsInstance(ancestor); } Http http = new Http(filterChains); http.getBindings().addAll(bindings); http.setHttpServer(new JettyHttpServerBuilder().build(deployState, ancestor, spec)); if (accessControl != null) { accessControl.configureHttpFilterChains(http); } return http; } private AccessControl buildAccessControl(DeployState deployState, AbstractConfigProducer ancestor, Element accessControlElem) { AthenzDomain domain = getAccessControlDomain(deployState, accessControlElem); AccessControl.Builder builder = new AccessControl.Builder(domain.value()); getContainerCluster(ancestor).ifPresent(builder::setHandlers); XmlHelper.getOptionalAttribute(accessControlElem, "read").ifPresent( readAttr -> builder.readEnabled(Boolean.valueOf(readAttr))); XmlHelper.getOptionalAttribute(accessControlElem, "write").ifPresent( writeAttr -> builder.writeEnabled(Boolean.valueOf(writeAttr))); Element excludeElem = XML.getChild(accessControlElem, "exclude"); if (excludeElem != null) { XML.getChildren(excludeElem, "binding").stream() .map(xml -> UserBindingPattern.fromPattern(XML.getValue(xml))) .forEach(builder::excludeBinding); } return builder.build(); } private static AthenzDomain getAccessControlDomain(DeployState deployState, Element accessControlElem) { AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); AthenzDomain explicitDomain = XmlHelper.getOptionalAttribute(accessControlElem, "domain") .map(AthenzDomain::from) .orElse(null); if (tenantDomain == null) { if (explicitDomain == null) { throw new IllegalStateException("No Athenz domain provided for 'access-control'"); } deployState.getDeployLogger().log(Level.WARNING, "Athenz tenant is not provided by deploy call. This will soon be handled as failure."); } if (explicitDomain != null) { if (tenantDomain != null && !explicitDomain.equals(tenantDomain)) { throw new IllegalArgumentException( String.format("Domain in access-control ('%s') does not match tenant domain ('%s')", explicitDomain.value(), tenantDomain.value())); } deployState.getDeployLogger().log(Level.WARNING, "Domain in 'access-control' is deprecated and will be removed soon"); } return tenantDomain != null ? tenantDomain : explicitDomain; } private static Optional<ApplicationContainerCluster> getContainerCluster(AbstractConfigProducer configProducer) { AbstractConfigProducer currentProducer = configProducer; while (! ApplicationContainerCluster.class.isAssignableFrom(currentProducer.getClass())) { currentProducer = currentProducer.getParent(); if (currentProducer == null) return Optional.empty(); } return Optional.of((ApplicationContainerCluster) currentProducer); } static int readPort(ModelElement spec, boolean isHosted, DeployLogger logger) { Integer port = spec.integerAttribute("port"); if (port == null) return Defaults.getDefaults().vespaWebServicePort(); if (port < 0) throw new IllegalArgumentException("Invalid port " + port); int legalPortInHostedVespa = Container.BASEPORT; if (isHosted && port != legalPortInHostedVespa && ! spec.booleanAttribute("required", false)) { throw new IllegalArgumentException("Illegal port " + port + " in http server '" + spec.stringAttribute("id") + "'" + ": Port must be set to " + legalPortInHostedVespa); } return port; } }
class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Http> { static final String REQUEST_CHAIN_TAG_NAME = "request-chain"; static final String RESPONSE_CHAIN_TAG_NAME = "response-chain"; static final List<String> VALID_FILTER_CHAIN_TAG_NAMES = List.of(REQUEST_CHAIN_TAG_NAME, RESPONSE_CHAIN_TAG_NAME); @Override protected Http doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element spec) { FilterChains filterChains; List<FilterBinding> bindings = new ArrayList<>(); AccessControl accessControl = null; Element filteringElem = XML.getChild(spec, "filtering"); if (filteringElem != null) { filterChains = new FilterChainsBuilder().build(deployState, ancestor, filteringElem); bindings = readFilterBindings(filteringElem); Element accessControlElem = XML.getChild(filteringElem, "access-control"); if (accessControlElem != null) { accessControl = buildAccessControl(deployState, ancestor, accessControlElem); } } else { filterChains = new FilterChainsBuilder().newChainsInstance(ancestor); } Http http = new Http(filterChains); http.getBindings().addAll(bindings); http.setHttpServer(new JettyHttpServerBuilder().build(deployState, ancestor, spec)); if (accessControl != null) { accessControl.configureHttpFilterChains(http); } return http; } private AccessControl buildAccessControl(DeployState deployState, AbstractConfigProducer ancestor, Element accessControlElem) { AthenzDomain domain = getAccessControlDomain(deployState, accessControlElem); AccessControl.Builder builder = new AccessControl.Builder(domain.value()); getContainerCluster(ancestor).ifPresent(builder::setHandlers); XmlHelper.getOptionalAttribute(accessControlElem, "read").ifPresent( readAttr -> builder.readEnabled(Boolean.valueOf(readAttr))); XmlHelper.getOptionalAttribute(accessControlElem, "write").ifPresent( writeAttr -> builder.writeEnabled(Boolean.valueOf(writeAttr))); Element excludeElem = XML.getChild(accessControlElem, "exclude"); if (excludeElem != null) { XML.getChildren(excludeElem, "binding").stream() .map(xml -> UserBindingPattern.fromPattern(XML.getValue(xml))) .forEach(builder::excludeBinding); } return builder.build(); } private static AthenzDomain getAccessControlDomain(DeployState deployState, Element accessControlElem) { AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); AthenzDomain explicitDomain = XmlHelper.getOptionalAttribute(accessControlElem, "domain") .map(AthenzDomain::from) .orElse(null); if (tenantDomain == null) { if (explicitDomain == null) { throw new IllegalStateException("No Athenz domain provided for 'access-control'"); } deployState.getDeployLogger().log(Level.WARNING, "Athenz tenant is not provided by deploy call. This will soon be handled as failure."); } if (explicitDomain != null) { if (tenantDomain != null && !explicitDomain.equals(tenantDomain)) { throw new IllegalArgumentException( String.format("Domain in access-control ('%s') does not match tenant domain ('%s')", explicitDomain.value(), tenantDomain.value())); } deployState.getDeployLogger().log(Level.WARNING, "Domain in 'access-control' is deprecated and will be removed soon"); } return tenantDomain != null ? tenantDomain : explicitDomain; } private static Optional<ApplicationContainerCluster> getContainerCluster(AbstractConfigProducer configProducer) { AbstractConfigProducer currentProducer = configProducer; while (! ApplicationContainerCluster.class.isAssignableFrom(currentProducer.getClass())) { currentProducer = currentProducer.getParent(); if (currentProducer == null) return Optional.empty(); } return Optional.of((ApplicationContainerCluster) currentProducer); } private static FilterBinding.Type toFilterBindingType(String chainTag) { switch (chainTag) { case REQUEST_CHAIN_TAG_NAME: return FilterBinding.Type.REQUEST; case RESPONSE_CHAIN_TAG_NAME: return FilterBinding.Type.RESPONSE; default: throw new IllegalArgumentException("Unknown filter chain tag: " + chainTag); } } static int readPort(ModelElement spec, boolean isHosted, DeployLogger logger) { Integer port = spec.integerAttribute("port"); if (port == null) return Defaults.getDefaults().vespaWebServicePort(); if (port < 0) throw new IllegalArgumentException("Invalid port " + port); int legalPortInHostedVespa = Container.BASEPORT; if (isHosted && port != legalPortInHostedVespa && ! spec.booleanAttribute("required", false)) { throw new IllegalArgumentException("Illegal port " + port + " in http server '" + spec.stringAttribute("id") + "'" + ": Port must be set to " + legalPortInHostedVespa); } return port; } }
The first parameter is assumed to be an access control binding, meaning it cannot be a response filter binding.
private static boolean effectivelyDuplicateOf(FilterBinding accessControlBinding, FilterBinding other) { if (accessControlBinding.chainId().equals(other.chainId())) return false; if (other.type() == FilterBinding.Type.RESPONSE) return false; return accessControlBinding.binding().equals(other.binding()) || (accessControlBinding.binding().path().equals(other.binding().path()) && other.binding().matchesAnyPort()); }
if (other.type() == FilterBinding.Type.RESPONSE) return false;
private static boolean effectivelyDuplicateOf(FilterBinding accessControlBinding, FilterBinding other) { if (accessControlBinding.chainId().equals(other.chainId())) return false; if (other.type() == FilterBinding.Type.RESPONSE) return false; return accessControlBinding.binding().equals(other.binding()) || (accessControlBinding.binding().path().equals(other.binding().path()) && other.binding().matchesAnyPort()); }
class Builder { private final String domain; private boolean readEnabled = false; private boolean writeEnabled = true; private final Set<BindingPattern> excludeBindings = new LinkedHashSet<>(); private Collection<Handler<?>> handlers = Collections.emptyList(); public Builder(String domain) { this.domain = domain; } public Builder readEnabled(boolean readEnabled) { this.readEnabled = readEnabled; return this; } public Builder writeEnabled(boolean writeEnabled) { this.writeEnabled = writeEnabled; return this; } public Builder excludeBinding(BindingPattern binding) { this.excludeBindings.add(binding); return this; } public Builder setHandlers(ApplicationContainerCluster cluster) { this.handlers = cluster.getHandlers(); return this; } public AccessControl build() { return new AccessControl(domain, writeEnabled, readEnabled, excludeBindings, handlers); } }
class Builder { private final String domain; private boolean readEnabled = false; private boolean writeEnabled = true; private final Set<BindingPattern> excludeBindings = new LinkedHashSet<>(); private Collection<Handler<?>> handlers = Collections.emptyList(); public Builder(String domain) { this.domain = domain; } public Builder readEnabled(boolean readEnabled) { this.readEnabled = readEnabled; return this; } public Builder writeEnabled(boolean writeEnabled) { this.writeEnabled = writeEnabled; return this; } public Builder excludeBinding(BindingPattern binding) { this.excludeBindings.add(binding); return this; } public Builder setHandlers(ApplicationContainerCluster cluster) { this.handlers = cluster.getHandlers(); return this; } public AccessControl build() { return new AccessControl(domain, writeEnabled, readEnabled, excludeBindings, handlers); } }
I have improved the logic in new commit.
private List<FilterBinding> readFilterBindings(Element filteringSpec) { List<FilterBinding> result = new ArrayList<>(); for (Element child: XML.getChildren(filteringSpec)) { String tagName = child.getTagName(); if ((tagName.equals("request-chain") || tagName.equals("response-chain"))) { ComponentSpecification chainId = XmlHelper.getIdRef(child); for (Element bindingSpec: XML.getChildren(child, "binding")) { String binding = XML.getValue(bindingSpec); FilterBinding.Type type = tagName.equals("request-chain") ? FilterBinding.Type.REQUEST : FilterBinding.Type.RESPONSE; result.add(FilterBinding.create(type, chainId, UserBindingPattern.fromPattern(binding))); } } } return result; }
FilterBinding.Type type = tagName.equals("request-chain") ? FilterBinding.Type.REQUEST : FilterBinding.Type.RESPONSE;
private List<FilterBinding> readFilterBindings(Element filteringSpec) { List<FilterBinding> result = new ArrayList<>(); for (Element child: XML.getChildren(filteringSpec)) { String tagName = child.getTagName(); if (VALID_FILTER_CHAIN_TAG_NAMES.contains(tagName)) { ComponentSpecification chainId = XmlHelper.getIdRef(child); for (Element bindingSpec: XML.getChildren(child, "binding")) { String binding = XML.getValue(bindingSpec); result.add(FilterBinding.create(toFilterBindingType(tagName), chainId, UserBindingPattern.fromPattern(binding))); } } } return result; }
class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Http> { @Override protected Http doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element spec) { FilterChains filterChains; List<FilterBinding> bindings = new ArrayList<>(); AccessControl accessControl = null; Element filteringElem = XML.getChild(spec, "filtering"); if (filteringElem != null) { filterChains = new FilterChainsBuilder().build(deployState, ancestor, filteringElem); bindings = readFilterBindings(filteringElem); Element accessControlElem = XML.getChild(filteringElem, "access-control"); if (accessControlElem != null) { accessControl = buildAccessControl(deployState, ancestor, accessControlElem); } } else { filterChains = new FilterChainsBuilder().newChainsInstance(ancestor); } Http http = new Http(filterChains); http.getBindings().addAll(bindings); http.setHttpServer(new JettyHttpServerBuilder().build(deployState, ancestor, spec)); if (accessControl != null) { accessControl.configureHttpFilterChains(http); } return http; } private AccessControl buildAccessControl(DeployState deployState, AbstractConfigProducer ancestor, Element accessControlElem) { AthenzDomain domain = getAccessControlDomain(deployState, accessControlElem); AccessControl.Builder builder = new AccessControl.Builder(domain.value()); getContainerCluster(ancestor).ifPresent(builder::setHandlers); XmlHelper.getOptionalAttribute(accessControlElem, "read").ifPresent( readAttr -> builder.readEnabled(Boolean.valueOf(readAttr))); XmlHelper.getOptionalAttribute(accessControlElem, "write").ifPresent( writeAttr -> builder.writeEnabled(Boolean.valueOf(writeAttr))); Element excludeElem = XML.getChild(accessControlElem, "exclude"); if (excludeElem != null) { XML.getChildren(excludeElem, "binding").stream() .map(xml -> UserBindingPattern.fromPattern(XML.getValue(xml))) .forEach(builder::excludeBinding); } return builder.build(); } private static AthenzDomain getAccessControlDomain(DeployState deployState, Element accessControlElem) { AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); AthenzDomain explicitDomain = XmlHelper.getOptionalAttribute(accessControlElem, "domain") .map(AthenzDomain::from) .orElse(null); if (tenantDomain == null) { if (explicitDomain == null) { throw new IllegalStateException("No Athenz domain provided for 'access-control'"); } deployState.getDeployLogger().log(Level.WARNING, "Athenz tenant is not provided by deploy call. This will soon be handled as failure."); } if (explicitDomain != null) { if (tenantDomain != null && !explicitDomain.equals(tenantDomain)) { throw new IllegalArgumentException( String.format("Domain in access-control ('%s') does not match tenant domain ('%s')", explicitDomain.value(), tenantDomain.value())); } deployState.getDeployLogger().log(Level.WARNING, "Domain in 'access-control' is deprecated and will be removed soon"); } return tenantDomain != null ? tenantDomain : explicitDomain; } private static Optional<ApplicationContainerCluster> getContainerCluster(AbstractConfigProducer configProducer) { AbstractConfigProducer currentProducer = configProducer; while (! ApplicationContainerCluster.class.isAssignableFrom(currentProducer.getClass())) { currentProducer = currentProducer.getParent(); if (currentProducer == null) return Optional.empty(); } return Optional.of((ApplicationContainerCluster) currentProducer); } static int readPort(ModelElement spec, boolean isHosted, DeployLogger logger) { Integer port = spec.integerAttribute("port"); if (port == null) return Defaults.getDefaults().vespaWebServicePort(); if (port < 0) throw new IllegalArgumentException("Invalid port " + port); int legalPortInHostedVespa = Container.BASEPORT; if (isHosted && port != legalPortInHostedVespa && ! spec.booleanAttribute("required", false)) { throw new IllegalArgumentException("Illegal port " + port + " in http server '" + spec.stringAttribute("id") + "'" + ": Port must be set to " + legalPortInHostedVespa); } return port; } }
class HttpBuilder extends VespaDomBuilder.DomConfigProducerBuilder<Http> { static final String REQUEST_CHAIN_TAG_NAME = "request-chain"; static final String RESPONSE_CHAIN_TAG_NAME = "response-chain"; static final List<String> VALID_FILTER_CHAIN_TAG_NAMES = List.of(REQUEST_CHAIN_TAG_NAME, RESPONSE_CHAIN_TAG_NAME); @Override protected Http doBuild(DeployState deployState, AbstractConfigProducer ancestor, Element spec) { FilterChains filterChains; List<FilterBinding> bindings = new ArrayList<>(); AccessControl accessControl = null; Element filteringElem = XML.getChild(spec, "filtering"); if (filteringElem != null) { filterChains = new FilterChainsBuilder().build(deployState, ancestor, filteringElem); bindings = readFilterBindings(filteringElem); Element accessControlElem = XML.getChild(filteringElem, "access-control"); if (accessControlElem != null) { accessControl = buildAccessControl(deployState, ancestor, accessControlElem); } } else { filterChains = new FilterChainsBuilder().newChainsInstance(ancestor); } Http http = new Http(filterChains); http.getBindings().addAll(bindings); http.setHttpServer(new JettyHttpServerBuilder().build(deployState, ancestor, spec)); if (accessControl != null) { accessControl.configureHttpFilterChains(http); } return http; } private AccessControl buildAccessControl(DeployState deployState, AbstractConfigProducer ancestor, Element accessControlElem) { AthenzDomain domain = getAccessControlDomain(deployState, accessControlElem); AccessControl.Builder builder = new AccessControl.Builder(domain.value()); getContainerCluster(ancestor).ifPresent(builder::setHandlers); XmlHelper.getOptionalAttribute(accessControlElem, "read").ifPresent( readAttr -> builder.readEnabled(Boolean.valueOf(readAttr))); XmlHelper.getOptionalAttribute(accessControlElem, "write").ifPresent( writeAttr -> builder.writeEnabled(Boolean.valueOf(writeAttr))); Element excludeElem = XML.getChild(accessControlElem, "exclude"); if (excludeElem != null) { XML.getChildren(excludeElem, "binding").stream() .map(xml -> UserBindingPattern.fromPattern(XML.getValue(xml))) .forEach(builder::excludeBinding); } return builder.build(); } private static AthenzDomain getAccessControlDomain(DeployState deployState, Element accessControlElem) { AthenzDomain tenantDomain = deployState.getProperties().athenzDomain().orElse(null); AthenzDomain explicitDomain = XmlHelper.getOptionalAttribute(accessControlElem, "domain") .map(AthenzDomain::from) .orElse(null); if (tenantDomain == null) { if (explicitDomain == null) { throw new IllegalStateException("No Athenz domain provided for 'access-control'"); } deployState.getDeployLogger().log(Level.WARNING, "Athenz tenant is not provided by deploy call. This will soon be handled as failure."); } if (explicitDomain != null) { if (tenantDomain != null && !explicitDomain.equals(tenantDomain)) { throw new IllegalArgumentException( String.format("Domain in access-control ('%s') does not match tenant domain ('%s')", explicitDomain.value(), tenantDomain.value())); } deployState.getDeployLogger().log(Level.WARNING, "Domain in 'access-control' is deprecated and will be removed soon"); } return tenantDomain != null ? tenantDomain : explicitDomain; } private static Optional<ApplicationContainerCluster> getContainerCluster(AbstractConfigProducer configProducer) { AbstractConfigProducer currentProducer = configProducer; while (! ApplicationContainerCluster.class.isAssignableFrom(currentProducer.getClass())) { currentProducer = currentProducer.getParent(); if (currentProducer == null) return Optional.empty(); } return Optional.of((ApplicationContainerCluster) currentProducer); } private static FilterBinding.Type toFilterBindingType(String chainTag) { switch (chainTag) { case REQUEST_CHAIN_TAG_NAME: return FilterBinding.Type.REQUEST; case RESPONSE_CHAIN_TAG_NAME: return FilterBinding.Type.RESPONSE; default: throw new IllegalArgumentException("Unknown filter chain tag: " + chainTag); } } static int readPort(ModelElement spec, boolean isHosted, DeployLogger logger) { Integer port = spec.integerAttribute("port"); if (port == null) return Defaults.getDefaults().vespaWebServicePort(); if (port < 0) throw new IllegalArgumentException("Invalid port " + port); int legalPortInHostedVespa = Container.BASEPORT; if (isHosted && port != legalPortInHostedVespa && ! spec.booleanAttribute("required", false)) { throw new IllegalArgumentException("Illegal port " + port + " in http server '" + spec.stringAttribute("id") + "'" + ": Port must be set to " + legalPortInHostedVespa); } return port; } }
Well, it's a private in the `AccessControl` class, so I guess that's OK.
private static boolean effectivelyDuplicateOf(FilterBinding accessControlBinding, FilterBinding other) { if (accessControlBinding.chainId().equals(other.chainId())) return false; if (other.type() == FilterBinding.Type.RESPONSE) return false; return accessControlBinding.binding().equals(other.binding()) || (accessControlBinding.binding().path().equals(other.binding().path()) && other.binding().matchesAnyPort()); }
if (other.type() == FilterBinding.Type.RESPONSE) return false;
private static boolean effectivelyDuplicateOf(FilterBinding accessControlBinding, FilterBinding other) { if (accessControlBinding.chainId().equals(other.chainId())) return false; if (other.type() == FilterBinding.Type.RESPONSE) return false; return accessControlBinding.binding().equals(other.binding()) || (accessControlBinding.binding().path().equals(other.binding().path()) && other.binding().matchesAnyPort()); }
class Builder { private final String domain; private boolean readEnabled = false; private boolean writeEnabled = true; private final Set<BindingPattern> excludeBindings = new LinkedHashSet<>(); private Collection<Handler<?>> handlers = Collections.emptyList(); public Builder(String domain) { this.domain = domain; } public Builder readEnabled(boolean readEnabled) { this.readEnabled = readEnabled; return this; } public Builder writeEnabled(boolean writeEnabled) { this.writeEnabled = writeEnabled; return this; } public Builder excludeBinding(BindingPattern binding) { this.excludeBindings.add(binding); return this; } public Builder setHandlers(ApplicationContainerCluster cluster) { this.handlers = cluster.getHandlers(); return this; } public AccessControl build() { return new AccessControl(domain, writeEnabled, readEnabled, excludeBindings, handlers); } }
class Builder { private final String domain; private boolean readEnabled = false; private boolean writeEnabled = true; private final Set<BindingPattern> excludeBindings = new LinkedHashSet<>(); private Collection<Handler<?>> handlers = Collections.emptyList(); public Builder(String domain) { this.domain = domain; } public Builder readEnabled(boolean readEnabled) { this.readEnabled = readEnabled; return this; } public Builder writeEnabled(boolean writeEnabled) { this.writeEnabled = writeEnabled; return this; } public Builder excludeBinding(BindingPattern binding) { this.excludeBindings.add(binding); return this; } public Builder setHandlers(ApplicationContainerCluster cluster) { this.handlers = cluster.getHandlers(); return this; } public AccessControl build() { return new AccessControl(domain, writeEnabled, readEnabled, excludeBindings, handlers); } }
Shouldn't this be numMsgWithOriginalSequenceId >= 1 ?
public void processingDone(List<Processing> processings) { List<DocumentMessage> messages = new ArrayList<>(); if (messageFactory != null) { for (Processing processing : processings) { for (DocumentOperation documentOperation : processing.getDocumentOperations()) { messages.add(messageFactory.fromDocumentOperation(processing, documentOperation)); } } } log.log(Level.FINE, () ->"Forwarding " + messages.size() + " messages from " + processings.size() + " processings."); if (messages.isEmpty()) { dispatchResponse(Response.Status.OK); return; } long inputSequenceId = requestMsg.getSequenceId(); ResponseMerger responseHandler = new ResponseMerger(requestMsg, messages.size(), this); int numMsgWithOriginalSequenceId = 0; for (Message message : messages) { if (message.getSequenceId() == inputSequenceId) numMsgWithOriginalSequenceId++; } for (Message message : messages) { String path = internalNoThrottledSourcePath; if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId)) path = getUri().getPath(); dispatchRequest(message, path, responseHandler); } }
if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId))
public void processingDone(List<Processing> processings) { List<DocumentMessage> messages = new ArrayList<>(); if (messageFactory != null) { for (Processing processing : processings) { for (DocumentOperation documentOperation : processing.getDocumentOperations()) { messages.add(messageFactory.fromDocumentOperation(processing, documentOperation)); } } } log.log(Level.FINE, () ->"Forwarding " + messages.size() + " messages from " + processings.size() + " processings."); if (messages.isEmpty()) { dispatchResponse(Response.Status.OK); return; } long inputSequenceId = requestMsg.getSequenceId(); ResponseMerger responseHandler = new ResponseMerger(requestMsg, messages.size(), this); int numMsgWithOriginalSequenceId = 0; for (Message message : messages) { if (message.getSequenceId() == inputSequenceId) numMsgWithOriginalSequenceId++; } for (Message message : messages) { String path = internalNoThrottledSourcePath; if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId)) path = getUri().getPath(); dispatchRequest(message, path, responseHandler); } }
class MbusRequestContext implements RequestContext, ResponseHandler { private final static Logger log = Logger.getLogger(MbusRequestContext.class.getName()); private final static CopyOnWriteHashMap<String, URI> uriCache = new CopyOnWriteHashMap<>(); private final AtomicBoolean deserialized = new AtomicBoolean(false); private final AtomicBoolean responded = new AtomicBoolean(false); private final ProcessingFactory processingFactory; private final MessageFactory messageFactory; private final MbusRequest request; private final DocumentMessage requestMsg; private final ResponseHandler responseHandler; public final static String internalNoThrottledSource = "internalNoThrottledSource"; private final static String internalNoThrottledSourcePath = "/" + internalNoThrottledSource; public MbusRequestContext(MbusRequest request, ResponseHandler responseHandler, ComponentRegistry<DocprocService> docprocServiceComponentRegistry, ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry, ContainerDocumentConfig containerDocConfig) { this.request = request; this.requestMsg = (DocumentMessage)request.getMessage(); this.responseHandler = responseHandler; this.processingFactory = new ProcessingFactory(docprocServiceComponentRegistry, docFactoryRegistry, containerDocConfig, getServiceName()); this.messageFactory = newMessageFactory(requestMsg); } @Override public List<Processing> getProcessings() { if (deserialized.getAndSet(true)) { return Collections.emptyList(); } return processingFactory.fromMessage(requestMsg); } @Override public void skip() { if (deserialized.get()) throw new IllegalStateException("Can not skip processing after deserialization"); dispatchRequest(requestMsg, request.getUri().getPath(), responseHandler); } @Override @Override public void processingFailed(Exception exception) { ErrorCode errorCode; if (exception instanceof TransientFailureException) { errorCode = ErrorCode.ERROR_ABORTED; } else { errorCode = ErrorCode.ERROR_PROCESSING_FAILURE; } StringBuilder errorMsg = new StringBuilder("Processing failed."); if (exception instanceof HandledProcessingException) { errorMsg.append(" Error message: ").append(exception.getMessage()); } else if (exception != null) { errorMsg.append(" Error message: ").append(exception.toString()); } errorMsg.append(" -- See Vespa log for details."); processingFailed(errorCode, errorMsg.toString()); } @Override public void processingFailed(ErrorCode errorCode, String errorMsg) { MbusResponse response = new MbusResponse(errorCode.getDiscStatus(), requestMsg.createReply()); response.getReply().addError(new com.yahoo.messagebus.Error(errorCode.getDocumentProtocolStatus(), errorMsg)); ResponseDispatch.newInstance(response).dispatch(this); } @Override public URI getUri() { return request.getUri(); } @Override public String getServiceName() { String path = getUri().getPath(); return path.substring(7, path.length()); } @Override public boolean isProcessable() { Message msg = requestMsg; switch (msg.getType()) { case DocumentProtocol.MESSAGE_PUTDOCUMENT: case DocumentProtocol.MESSAGE_UPDATEDOCUMENT: case DocumentProtocol.MESSAGE_REMOVEDOCUMENT: return true; } return false; } @Override public boolean hasExpired() { return requestMsg.isExpired(); } @Override public ContentChannel handleResponse(Response response) { if (responded.getAndSet(true)) { return null; } Reply reply = ((MbusResponse)response).getReply(); reply.swapState(requestMsg); return responseHandler.handleResponse(response); } private void dispatchResponse(int status) { ResponseDispatch.newInstance(new MbusResponse(status, requestMsg.createReply())).dispatch(this); } private void dispatchRequest(Message msg, String uriPath, ResponseHandler handler) { try { new RequestDispatch() { @Override protected Request newRequest() { return new MbusRequest(request, resolveUri(uriPath), msg); } @Override public ContentChannel handleResponse(Response response) { return handler.handleResponse(response); } }.dispatch(); } catch (Exception e) { dispatchResponse(Response.Status.INTERNAL_SERVER_ERROR); e.printStackTrace(); } } private static MessageFactory newMessageFactory(DocumentMessage message) { if (message == null) return null; if (message.getRoute() == null || ! message.getRoute().hasHops()) return null; return new MessageFactory(message); } private static URI resolveUri(String path) { URI uri = uriCache.get(path); if (uri == null) { uri = URI.create("mbus: uriCache.put(path, uri); } return uri; } }
class MbusRequestContext implements RequestContext, ResponseHandler { private final static Logger log = Logger.getLogger(MbusRequestContext.class.getName()); private final static CopyOnWriteHashMap<String, URI> uriCache = new CopyOnWriteHashMap<>(); private final AtomicBoolean deserialized = new AtomicBoolean(false); private final AtomicBoolean responded = new AtomicBoolean(false); private final ProcessingFactory processingFactory; private final MessageFactory messageFactory; private final MbusRequest request; private final DocumentMessage requestMsg; private final ResponseHandler responseHandler; public final static String internalNoThrottledSource = "internalNoThrottledSource"; private final static String internalNoThrottledSourcePath = "/" + internalNoThrottledSource; public MbusRequestContext(MbusRequest request, ResponseHandler responseHandler, ComponentRegistry<DocprocService> docprocServiceComponentRegistry, ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry, ContainerDocumentConfig containerDocConfig) { this.request = request; this.requestMsg = (DocumentMessage)request.getMessage(); this.responseHandler = responseHandler; this.processingFactory = new ProcessingFactory(docprocServiceComponentRegistry, docFactoryRegistry, containerDocConfig, getServiceName()); this.messageFactory = newMessageFactory(requestMsg); } @Override public List<Processing> getProcessings() { if (deserialized.getAndSet(true)) { return Collections.emptyList(); } return processingFactory.fromMessage(requestMsg); } @Override public void skip() { if (deserialized.get()) throw new IllegalStateException("Can not skip processing after deserialization"); dispatchRequest(requestMsg, request.getUri().getPath(), responseHandler); } @Override @Override public void processingFailed(Exception exception) { ErrorCode errorCode; if (exception instanceof TransientFailureException) { errorCode = ErrorCode.ERROR_ABORTED; } else { errorCode = ErrorCode.ERROR_PROCESSING_FAILURE; } StringBuilder errorMsg = new StringBuilder("Processing failed."); if (exception instanceof HandledProcessingException) { errorMsg.append(" Error message: ").append(exception.getMessage()); } else if (exception != null) { errorMsg.append(" Error message: ").append(exception.toString()); } errorMsg.append(" -- See Vespa log for details."); processingFailed(errorCode, errorMsg.toString()); } @Override public void processingFailed(ErrorCode errorCode, String errorMsg) { MbusResponse response = new MbusResponse(errorCode.getDiscStatus(), requestMsg.createReply()); response.getReply().addError(new com.yahoo.messagebus.Error(errorCode.getDocumentProtocolStatus(), errorMsg)); ResponseDispatch.newInstance(response).dispatch(this); } @Override public URI getUri() { return request.getUri(); } @Override public String getServiceName() { String path = getUri().getPath(); return path.substring(7, path.length()); } @Override public boolean isProcessable() { Message msg = requestMsg; switch (msg.getType()) { case DocumentProtocol.MESSAGE_PUTDOCUMENT: case DocumentProtocol.MESSAGE_UPDATEDOCUMENT: case DocumentProtocol.MESSAGE_REMOVEDOCUMENT: return true; } return false; } @Override public boolean hasExpired() { return requestMsg.isExpired(); } @Override public ContentChannel handleResponse(Response response) { if (responded.getAndSet(true)) { return null; } Reply reply = ((MbusResponse)response).getReply(); reply.swapState(requestMsg); return responseHandler.handleResponse(response); } private void dispatchResponse(int status) { ResponseDispatch.newInstance(new MbusResponse(status, requestMsg.createReply())).dispatch(this); } private void dispatchRequest(Message msg, String uriPath, ResponseHandler handler) { try { new RequestDispatch() { @Override protected Request newRequest() { return new MbusRequest(request, resolveUri(uriPath), msg); } @Override public ContentChannel handleResponse(Response response) { return handler.handleResponse(response); } }.dispatch(); } catch (Exception e) { dispatchResponse(Response.Status.INTERNAL_SERVER_ERROR); e.printStackTrace(); } } private static MessageFactory newMessageFactory(DocumentMessage message) { if (message == null) return null; if (message.getRoute() == null || ! message.getRoute().hasHops()) return null; return new MessageFactory(message); } private static URI resolveUri(String path) { URI uri = uriCache.get(path); if (uri == null) { uri = URI.create("mbus: uriCache.put(path, uri); } return uri; } }
No, you shall only continue on the original path if there is exactly one operation with the same id as the incoming operation. If there are multiple operations with the same id they must always be sequenced. If they are to the original id or not does not matter.
public void processingDone(List<Processing> processings) { List<DocumentMessage> messages = new ArrayList<>(); if (messageFactory != null) { for (Processing processing : processings) { for (DocumentOperation documentOperation : processing.getDocumentOperations()) { messages.add(messageFactory.fromDocumentOperation(processing, documentOperation)); } } } log.log(Level.FINE, () ->"Forwarding " + messages.size() + " messages from " + processings.size() + " processings."); if (messages.isEmpty()) { dispatchResponse(Response.Status.OK); return; } long inputSequenceId = requestMsg.getSequenceId(); ResponseMerger responseHandler = new ResponseMerger(requestMsg, messages.size(), this); int numMsgWithOriginalSequenceId = 0; for (Message message : messages) { if (message.getSequenceId() == inputSequenceId) numMsgWithOriginalSequenceId++; } for (Message message : messages) { String path = internalNoThrottledSourcePath; if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId)) path = getUri().getPath(); dispatchRequest(message, path, responseHandler); } }
if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId))
public void processingDone(List<Processing> processings) { List<DocumentMessage> messages = new ArrayList<>(); if (messageFactory != null) { for (Processing processing : processings) { for (DocumentOperation documentOperation : processing.getDocumentOperations()) { messages.add(messageFactory.fromDocumentOperation(processing, documentOperation)); } } } log.log(Level.FINE, () ->"Forwarding " + messages.size() + " messages from " + processings.size() + " processings."); if (messages.isEmpty()) { dispatchResponse(Response.Status.OK); return; } long inputSequenceId = requestMsg.getSequenceId(); ResponseMerger responseHandler = new ResponseMerger(requestMsg, messages.size(), this); int numMsgWithOriginalSequenceId = 0; for (Message message : messages) { if (message.getSequenceId() == inputSequenceId) numMsgWithOriginalSequenceId++; } for (Message message : messages) { String path = internalNoThrottledSourcePath; if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId)) path = getUri().getPath(); dispatchRequest(message, path, responseHandler); } }
class MbusRequestContext implements RequestContext, ResponseHandler { private final static Logger log = Logger.getLogger(MbusRequestContext.class.getName()); private final static CopyOnWriteHashMap<String, URI> uriCache = new CopyOnWriteHashMap<>(); private final AtomicBoolean deserialized = new AtomicBoolean(false); private final AtomicBoolean responded = new AtomicBoolean(false); private final ProcessingFactory processingFactory; private final MessageFactory messageFactory; private final MbusRequest request; private final DocumentMessage requestMsg; private final ResponseHandler responseHandler; public final static String internalNoThrottledSource = "internalNoThrottledSource"; private final static String internalNoThrottledSourcePath = "/" + internalNoThrottledSource; public MbusRequestContext(MbusRequest request, ResponseHandler responseHandler, ComponentRegistry<DocprocService> docprocServiceComponentRegistry, ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry, ContainerDocumentConfig containerDocConfig) { this.request = request; this.requestMsg = (DocumentMessage)request.getMessage(); this.responseHandler = responseHandler; this.processingFactory = new ProcessingFactory(docprocServiceComponentRegistry, docFactoryRegistry, containerDocConfig, getServiceName()); this.messageFactory = newMessageFactory(requestMsg); } @Override public List<Processing> getProcessings() { if (deserialized.getAndSet(true)) { return Collections.emptyList(); } return processingFactory.fromMessage(requestMsg); } @Override public void skip() { if (deserialized.get()) throw new IllegalStateException("Can not skip processing after deserialization"); dispatchRequest(requestMsg, request.getUri().getPath(), responseHandler); } @Override @Override public void processingFailed(Exception exception) { ErrorCode errorCode; if (exception instanceof TransientFailureException) { errorCode = ErrorCode.ERROR_ABORTED; } else { errorCode = ErrorCode.ERROR_PROCESSING_FAILURE; } StringBuilder errorMsg = new StringBuilder("Processing failed."); if (exception instanceof HandledProcessingException) { errorMsg.append(" Error message: ").append(exception.getMessage()); } else if (exception != null) { errorMsg.append(" Error message: ").append(exception.toString()); } errorMsg.append(" -- See Vespa log for details."); processingFailed(errorCode, errorMsg.toString()); } @Override public void processingFailed(ErrorCode errorCode, String errorMsg) { MbusResponse response = new MbusResponse(errorCode.getDiscStatus(), requestMsg.createReply()); response.getReply().addError(new com.yahoo.messagebus.Error(errorCode.getDocumentProtocolStatus(), errorMsg)); ResponseDispatch.newInstance(response).dispatch(this); } @Override public URI getUri() { return request.getUri(); } @Override public String getServiceName() { String path = getUri().getPath(); return path.substring(7, path.length()); } @Override public boolean isProcessable() { Message msg = requestMsg; switch (msg.getType()) { case DocumentProtocol.MESSAGE_PUTDOCUMENT: case DocumentProtocol.MESSAGE_UPDATEDOCUMENT: case DocumentProtocol.MESSAGE_REMOVEDOCUMENT: return true; } return false; } @Override public boolean hasExpired() { return requestMsg.isExpired(); } @Override public ContentChannel handleResponse(Response response) { if (responded.getAndSet(true)) { return null; } Reply reply = ((MbusResponse)response).getReply(); reply.swapState(requestMsg); return responseHandler.handleResponse(response); } private void dispatchResponse(int status) { ResponseDispatch.newInstance(new MbusResponse(status, requestMsg.createReply())).dispatch(this); } private void dispatchRequest(Message msg, String uriPath, ResponseHandler handler) { try { new RequestDispatch() { @Override protected Request newRequest() { return new MbusRequest(request, resolveUri(uriPath), msg); } @Override public ContentChannel handleResponse(Response response) { return handler.handleResponse(response); } }.dispatch(); } catch (Exception e) { dispatchResponse(Response.Status.INTERNAL_SERVER_ERROR); e.printStackTrace(); } } private static MessageFactory newMessageFactory(DocumentMessage message) { if (message == null) return null; if (message.getRoute() == null || ! message.getRoute().hasHops()) return null; return new MessageFactory(message); } private static URI resolveUri(String path) { URI uri = uriCache.get(path); if (uri == null) { uri = URI.create("mbus: uriCache.put(path, uri); } return uri; } }
class MbusRequestContext implements RequestContext, ResponseHandler { private final static Logger log = Logger.getLogger(MbusRequestContext.class.getName()); private final static CopyOnWriteHashMap<String, URI> uriCache = new CopyOnWriteHashMap<>(); private final AtomicBoolean deserialized = new AtomicBoolean(false); private final AtomicBoolean responded = new AtomicBoolean(false); private final ProcessingFactory processingFactory; private final MessageFactory messageFactory; private final MbusRequest request; private final DocumentMessage requestMsg; private final ResponseHandler responseHandler; public final static String internalNoThrottledSource = "internalNoThrottledSource"; private final static String internalNoThrottledSourcePath = "/" + internalNoThrottledSource; public MbusRequestContext(MbusRequest request, ResponseHandler responseHandler, ComponentRegistry<DocprocService> docprocServiceComponentRegistry, ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry, ContainerDocumentConfig containerDocConfig) { this.request = request; this.requestMsg = (DocumentMessage)request.getMessage(); this.responseHandler = responseHandler; this.processingFactory = new ProcessingFactory(docprocServiceComponentRegistry, docFactoryRegistry, containerDocConfig, getServiceName()); this.messageFactory = newMessageFactory(requestMsg); } @Override public List<Processing> getProcessings() { if (deserialized.getAndSet(true)) { return Collections.emptyList(); } return processingFactory.fromMessage(requestMsg); } @Override public void skip() { if (deserialized.get()) throw new IllegalStateException("Can not skip processing after deserialization"); dispatchRequest(requestMsg, request.getUri().getPath(), responseHandler); } @Override @Override public void processingFailed(Exception exception) { ErrorCode errorCode; if (exception instanceof TransientFailureException) { errorCode = ErrorCode.ERROR_ABORTED; } else { errorCode = ErrorCode.ERROR_PROCESSING_FAILURE; } StringBuilder errorMsg = new StringBuilder("Processing failed."); if (exception instanceof HandledProcessingException) { errorMsg.append(" Error message: ").append(exception.getMessage()); } else if (exception != null) { errorMsg.append(" Error message: ").append(exception.toString()); } errorMsg.append(" -- See Vespa log for details."); processingFailed(errorCode, errorMsg.toString()); } @Override public void processingFailed(ErrorCode errorCode, String errorMsg) { MbusResponse response = new MbusResponse(errorCode.getDiscStatus(), requestMsg.createReply()); response.getReply().addError(new com.yahoo.messagebus.Error(errorCode.getDocumentProtocolStatus(), errorMsg)); ResponseDispatch.newInstance(response).dispatch(this); } @Override public URI getUri() { return request.getUri(); } @Override public String getServiceName() { String path = getUri().getPath(); return path.substring(7, path.length()); } @Override public boolean isProcessable() { Message msg = requestMsg; switch (msg.getType()) { case DocumentProtocol.MESSAGE_PUTDOCUMENT: case DocumentProtocol.MESSAGE_UPDATEDOCUMENT: case DocumentProtocol.MESSAGE_REMOVEDOCUMENT: return true; } return false; } @Override public boolean hasExpired() { return requestMsg.isExpired(); } @Override public ContentChannel handleResponse(Response response) { if (responded.getAndSet(true)) { return null; } Reply reply = ((MbusResponse)response).getReply(); reply.swapState(requestMsg); return responseHandler.handleResponse(response); } private void dispatchResponse(int status) { ResponseDispatch.newInstance(new MbusResponse(status, requestMsg.createReply())).dispatch(this); } private void dispatchRequest(Message msg, String uriPath, ResponseHandler handler) { try { new RequestDispatch() { @Override protected Request newRequest() { return new MbusRequest(request, resolveUri(uriPath), msg); } @Override public ContentChannel handleResponse(Response response) { return handler.handleResponse(response); } }.dispatch(); } catch (Exception e) { dispatchResponse(Response.Status.INTERNAL_SERVER_ERROR); e.printStackTrace(); } } private static MessageFactory newMessageFactory(DocumentMessage message) { if (message == null) return null; if (message.getRoute() == null || ! message.getRoute().hasHops()) return null; return new MessageFactory(message); } private static URI resolveUri(String path) { URI uri = uriCache.get(path); if (uri == null) { uri = URI.create("mbus: uriCache.put(path, uri); } return uri; } }
The if is actually the normal path :)
public void processingDone(List<Processing> processings) { List<DocumentMessage> messages = new ArrayList<>(); if (messageFactory != null) { for (Processing processing : processings) { for (DocumentOperation documentOperation : processing.getDocumentOperations()) { messages.add(messageFactory.fromDocumentOperation(processing, documentOperation)); } } } log.log(Level.FINE, () ->"Forwarding " + messages.size() + " messages from " + processings.size() + " processings."); if (messages.isEmpty()) { dispatchResponse(Response.Status.OK); return; } long inputSequenceId = requestMsg.getSequenceId(); ResponseMerger responseHandler = new ResponseMerger(requestMsg, messages.size(), this); int numMsgWithOriginalSequenceId = 0; for (Message message : messages) { if (message.getSequenceId() == inputSequenceId) numMsgWithOriginalSequenceId++; } for (Message message : messages) { String path = internalNoThrottledSourcePath; if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId)) path = getUri().getPath(); dispatchRequest(message, path, responseHandler); } }
if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId))
public void processingDone(List<Processing> processings) { List<DocumentMessage> messages = new ArrayList<>(); if (messageFactory != null) { for (Processing processing : processings) { for (DocumentOperation documentOperation : processing.getDocumentOperations()) { messages.add(messageFactory.fromDocumentOperation(processing, documentOperation)); } } } log.log(Level.FINE, () ->"Forwarding " + messages.size() + " messages from " + processings.size() + " processings."); if (messages.isEmpty()) { dispatchResponse(Response.Status.OK); return; } long inputSequenceId = requestMsg.getSequenceId(); ResponseMerger responseHandler = new ResponseMerger(requestMsg, messages.size(), this); int numMsgWithOriginalSequenceId = 0; for (Message message : messages) { if (message.getSequenceId() == inputSequenceId) numMsgWithOriginalSequenceId++; } for (Message message : messages) { String path = internalNoThrottledSourcePath; if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId)) path = getUri().getPath(); dispatchRequest(message, path, responseHandler); } }
class MbusRequestContext implements RequestContext, ResponseHandler { private final static Logger log = Logger.getLogger(MbusRequestContext.class.getName()); private final static CopyOnWriteHashMap<String, URI> uriCache = new CopyOnWriteHashMap<>(); private final AtomicBoolean deserialized = new AtomicBoolean(false); private final AtomicBoolean responded = new AtomicBoolean(false); private final ProcessingFactory processingFactory; private final MessageFactory messageFactory; private final MbusRequest request; private final DocumentMessage requestMsg; private final ResponseHandler responseHandler; public final static String internalNoThrottledSource = "internalNoThrottledSource"; private final static String internalNoThrottledSourcePath = "/" + internalNoThrottledSource; public MbusRequestContext(MbusRequest request, ResponseHandler responseHandler, ComponentRegistry<DocprocService> docprocServiceComponentRegistry, ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry, ContainerDocumentConfig containerDocConfig) { this.request = request; this.requestMsg = (DocumentMessage)request.getMessage(); this.responseHandler = responseHandler; this.processingFactory = new ProcessingFactory(docprocServiceComponentRegistry, docFactoryRegistry, containerDocConfig, getServiceName()); this.messageFactory = newMessageFactory(requestMsg); } @Override public List<Processing> getProcessings() { if (deserialized.getAndSet(true)) { return Collections.emptyList(); } return processingFactory.fromMessage(requestMsg); } @Override public void skip() { if (deserialized.get()) throw new IllegalStateException("Can not skip processing after deserialization"); dispatchRequest(requestMsg, request.getUri().getPath(), responseHandler); } @Override @Override public void processingFailed(Exception exception) { ErrorCode errorCode; if (exception instanceof TransientFailureException) { errorCode = ErrorCode.ERROR_ABORTED; } else { errorCode = ErrorCode.ERROR_PROCESSING_FAILURE; } StringBuilder errorMsg = new StringBuilder("Processing failed."); if (exception instanceof HandledProcessingException) { errorMsg.append(" Error message: ").append(exception.getMessage()); } else if (exception != null) { errorMsg.append(" Error message: ").append(exception.toString()); } errorMsg.append(" -- See Vespa log for details."); processingFailed(errorCode, errorMsg.toString()); } @Override public void processingFailed(ErrorCode errorCode, String errorMsg) { MbusResponse response = new MbusResponse(errorCode.getDiscStatus(), requestMsg.createReply()); response.getReply().addError(new com.yahoo.messagebus.Error(errorCode.getDocumentProtocolStatus(), errorMsg)); ResponseDispatch.newInstance(response).dispatch(this); } @Override public URI getUri() { return request.getUri(); } @Override public String getServiceName() { String path = getUri().getPath(); return path.substring(7, path.length()); } @Override public boolean isProcessable() { Message msg = requestMsg; switch (msg.getType()) { case DocumentProtocol.MESSAGE_PUTDOCUMENT: case DocumentProtocol.MESSAGE_UPDATEDOCUMENT: case DocumentProtocol.MESSAGE_REMOVEDOCUMENT: return true; } return false; } @Override public boolean hasExpired() { return requestMsg.isExpired(); } @Override public ContentChannel handleResponse(Response response) { if (responded.getAndSet(true)) { return null; } Reply reply = ((MbusResponse)response).getReply(); reply.swapState(requestMsg); return responseHandler.handleResponse(response); } private void dispatchResponse(int status) { ResponseDispatch.newInstance(new MbusResponse(status, requestMsg.createReply())).dispatch(this); } private void dispatchRequest(Message msg, String uriPath, ResponseHandler handler) { try { new RequestDispatch() { @Override protected Request newRequest() { return new MbusRequest(request, resolveUri(uriPath), msg); } @Override public ContentChannel handleResponse(Response response) { return handler.handleResponse(response); } }.dispatch(); } catch (Exception e) { dispatchResponse(Response.Status.INTERNAL_SERVER_ERROR); e.printStackTrace(); } } private static MessageFactory newMessageFactory(DocumentMessage message) { if (message == null) return null; if (message.getRoute() == null || ! message.getRoute().hasHops()) return null; return new MessageFactory(message); } private static URI resolveUri(String path) { URI uri = uriCache.get(path); if (uri == null) { uri = URI.create("mbus: uriCache.put(path, uri); } return uri; } }
class MbusRequestContext implements RequestContext, ResponseHandler { private final static Logger log = Logger.getLogger(MbusRequestContext.class.getName()); private final static CopyOnWriteHashMap<String, URI> uriCache = new CopyOnWriteHashMap<>(); private final AtomicBoolean deserialized = new AtomicBoolean(false); private final AtomicBoolean responded = new AtomicBoolean(false); private final ProcessingFactory processingFactory; private final MessageFactory messageFactory; private final MbusRequest request; private final DocumentMessage requestMsg; private final ResponseHandler responseHandler; public final static String internalNoThrottledSource = "internalNoThrottledSource"; private final static String internalNoThrottledSourcePath = "/" + internalNoThrottledSource; public MbusRequestContext(MbusRequest request, ResponseHandler responseHandler, ComponentRegistry<DocprocService> docprocServiceComponentRegistry, ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry, ContainerDocumentConfig containerDocConfig) { this.request = request; this.requestMsg = (DocumentMessage)request.getMessage(); this.responseHandler = responseHandler; this.processingFactory = new ProcessingFactory(docprocServiceComponentRegistry, docFactoryRegistry, containerDocConfig, getServiceName()); this.messageFactory = newMessageFactory(requestMsg); } @Override public List<Processing> getProcessings() { if (deserialized.getAndSet(true)) { return Collections.emptyList(); } return processingFactory.fromMessage(requestMsg); } @Override public void skip() { if (deserialized.get()) throw new IllegalStateException("Can not skip processing after deserialization"); dispatchRequest(requestMsg, request.getUri().getPath(), responseHandler); } @Override @Override public void processingFailed(Exception exception) { ErrorCode errorCode; if (exception instanceof TransientFailureException) { errorCode = ErrorCode.ERROR_ABORTED; } else { errorCode = ErrorCode.ERROR_PROCESSING_FAILURE; } StringBuilder errorMsg = new StringBuilder("Processing failed."); if (exception instanceof HandledProcessingException) { errorMsg.append(" Error message: ").append(exception.getMessage()); } else if (exception != null) { errorMsg.append(" Error message: ").append(exception.toString()); } errorMsg.append(" -- See Vespa log for details."); processingFailed(errorCode, errorMsg.toString()); } @Override public void processingFailed(ErrorCode errorCode, String errorMsg) { MbusResponse response = new MbusResponse(errorCode.getDiscStatus(), requestMsg.createReply()); response.getReply().addError(new com.yahoo.messagebus.Error(errorCode.getDocumentProtocolStatus(), errorMsg)); ResponseDispatch.newInstance(response).dispatch(this); } @Override public URI getUri() { return request.getUri(); } @Override public String getServiceName() { String path = getUri().getPath(); return path.substring(7, path.length()); } @Override public boolean isProcessable() { Message msg = requestMsg; switch (msg.getType()) { case DocumentProtocol.MESSAGE_PUTDOCUMENT: case DocumentProtocol.MESSAGE_UPDATEDOCUMENT: case DocumentProtocol.MESSAGE_REMOVEDOCUMENT: return true; } return false; } @Override public boolean hasExpired() { return requestMsg.isExpired(); } @Override public ContentChannel handleResponse(Response response) { if (responded.getAndSet(true)) { return null; } Reply reply = ((MbusResponse)response).getReply(); reply.swapState(requestMsg); return responseHandler.handleResponse(response); } private void dispatchResponse(int status) { ResponseDispatch.newInstance(new MbusResponse(status, requestMsg.createReply())).dispatch(this); } private void dispatchRequest(Message msg, String uriPath, ResponseHandler handler) { try { new RequestDispatch() { @Override protected Request newRequest() { return new MbusRequest(request, resolveUri(uriPath), msg); } @Override public ContentChannel handleResponse(Response response) { return handler.handleResponse(response); } }.dispatch(); } catch (Exception e) { dispatchResponse(Response.Status.INTERNAL_SERVER_ERROR); e.printStackTrace(); } } private static MessageFactory newMessageFactory(DocumentMessage message) { if (message == null) return null; if (message.getRoute() == null || ! message.getRoute().hasHops()) return null; return new MessageFactory(message); } private static URI resolveUri(String path) { URI uri = uriCache.get(path); if (uri == null) { uri = URI.create("mbus: uriCache.put(path, uri); } return uri; } }
👍
public void processingDone(List<Processing> processings) { List<DocumentMessage> messages = new ArrayList<>(); if (messageFactory != null) { for (Processing processing : processings) { for (DocumentOperation documentOperation : processing.getDocumentOperations()) { messages.add(messageFactory.fromDocumentOperation(processing, documentOperation)); } } } log.log(Level.FINE, () ->"Forwarding " + messages.size() + " messages from " + processings.size() + " processings."); if (messages.isEmpty()) { dispatchResponse(Response.Status.OK); return; } long inputSequenceId = requestMsg.getSequenceId(); ResponseMerger responseHandler = new ResponseMerger(requestMsg, messages.size(), this); int numMsgWithOriginalSequenceId = 0; for (Message message : messages) { if (message.getSequenceId() == inputSequenceId) numMsgWithOriginalSequenceId++; } for (Message message : messages) { String path = internalNoThrottledSourcePath; if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId)) path = getUri().getPath(); dispatchRequest(message, path, responseHandler); } }
if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId))
public void processingDone(List<Processing> processings) { List<DocumentMessage> messages = new ArrayList<>(); if (messageFactory != null) { for (Processing processing : processings) { for (DocumentOperation documentOperation : processing.getDocumentOperations()) { messages.add(messageFactory.fromDocumentOperation(processing, documentOperation)); } } } log.log(Level.FINE, () ->"Forwarding " + messages.size() + " messages from " + processings.size() + " processings."); if (messages.isEmpty()) { dispatchResponse(Response.Status.OK); return; } long inputSequenceId = requestMsg.getSequenceId(); ResponseMerger responseHandler = new ResponseMerger(requestMsg, messages.size(), this); int numMsgWithOriginalSequenceId = 0; for (Message message : messages) { if (message.getSequenceId() == inputSequenceId) numMsgWithOriginalSequenceId++; } for (Message message : messages) { String path = internalNoThrottledSourcePath; if ((numMsgWithOriginalSequenceId == 1) && (message.getSequenceId() == inputSequenceId)) path = getUri().getPath(); dispatchRequest(message, path, responseHandler); } }
class MbusRequestContext implements RequestContext, ResponseHandler { private final static Logger log = Logger.getLogger(MbusRequestContext.class.getName()); private final static CopyOnWriteHashMap<String, URI> uriCache = new CopyOnWriteHashMap<>(); private final AtomicBoolean deserialized = new AtomicBoolean(false); private final AtomicBoolean responded = new AtomicBoolean(false); private final ProcessingFactory processingFactory; private final MessageFactory messageFactory; private final MbusRequest request; private final DocumentMessage requestMsg; private final ResponseHandler responseHandler; public final static String internalNoThrottledSource = "internalNoThrottledSource"; private final static String internalNoThrottledSourcePath = "/" + internalNoThrottledSource; public MbusRequestContext(MbusRequest request, ResponseHandler responseHandler, ComponentRegistry<DocprocService> docprocServiceComponentRegistry, ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry, ContainerDocumentConfig containerDocConfig) { this.request = request; this.requestMsg = (DocumentMessage)request.getMessage(); this.responseHandler = responseHandler; this.processingFactory = new ProcessingFactory(docprocServiceComponentRegistry, docFactoryRegistry, containerDocConfig, getServiceName()); this.messageFactory = newMessageFactory(requestMsg); } @Override public List<Processing> getProcessings() { if (deserialized.getAndSet(true)) { return Collections.emptyList(); } return processingFactory.fromMessage(requestMsg); } @Override public void skip() { if (deserialized.get()) throw new IllegalStateException("Can not skip processing after deserialization"); dispatchRequest(requestMsg, request.getUri().getPath(), responseHandler); } @Override @Override public void processingFailed(Exception exception) { ErrorCode errorCode; if (exception instanceof TransientFailureException) { errorCode = ErrorCode.ERROR_ABORTED; } else { errorCode = ErrorCode.ERROR_PROCESSING_FAILURE; } StringBuilder errorMsg = new StringBuilder("Processing failed."); if (exception instanceof HandledProcessingException) { errorMsg.append(" Error message: ").append(exception.getMessage()); } else if (exception != null) { errorMsg.append(" Error message: ").append(exception.toString()); } errorMsg.append(" -- See Vespa log for details."); processingFailed(errorCode, errorMsg.toString()); } @Override public void processingFailed(ErrorCode errorCode, String errorMsg) { MbusResponse response = new MbusResponse(errorCode.getDiscStatus(), requestMsg.createReply()); response.getReply().addError(new com.yahoo.messagebus.Error(errorCode.getDocumentProtocolStatus(), errorMsg)); ResponseDispatch.newInstance(response).dispatch(this); } @Override public URI getUri() { return request.getUri(); } @Override public String getServiceName() { String path = getUri().getPath(); return path.substring(7, path.length()); } @Override public boolean isProcessable() { Message msg = requestMsg; switch (msg.getType()) { case DocumentProtocol.MESSAGE_PUTDOCUMENT: case DocumentProtocol.MESSAGE_UPDATEDOCUMENT: case DocumentProtocol.MESSAGE_REMOVEDOCUMENT: return true; } return false; } @Override public boolean hasExpired() { return requestMsg.isExpired(); } @Override public ContentChannel handleResponse(Response response) { if (responded.getAndSet(true)) { return null; } Reply reply = ((MbusResponse)response).getReply(); reply.swapState(requestMsg); return responseHandler.handleResponse(response); } private void dispatchResponse(int status) { ResponseDispatch.newInstance(new MbusResponse(status, requestMsg.createReply())).dispatch(this); } private void dispatchRequest(Message msg, String uriPath, ResponseHandler handler) { try { new RequestDispatch() { @Override protected Request newRequest() { return new MbusRequest(request, resolveUri(uriPath), msg); } @Override public ContentChannel handleResponse(Response response) { return handler.handleResponse(response); } }.dispatch(); } catch (Exception e) { dispatchResponse(Response.Status.INTERNAL_SERVER_ERROR); e.printStackTrace(); } } private static MessageFactory newMessageFactory(DocumentMessage message) { if (message == null) return null; if (message.getRoute() == null || ! message.getRoute().hasHops()) return null; return new MessageFactory(message); } private static URI resolveUri(String path) { URI uri = uriCache.get(path); if (uri == null) { uri = URI.create("mbus: uriCache.put(path, uri); } return uri; } }
class MbusRequestContext implements RequestContext, ResponseHandler { private final static Logger log = Logger.getLogger(MbusRequestContext.class.getName()); private final static CopyOnWriteHashMap<String, URI> uriCache = new CopyOnWriteHashMap<>(); private final AtomicBoolean deserialized = new AtomicBoolean(false); private final AtomicBoolean responded = new AtomicBoolean(false); private final ProcessingFactory processingFactory; private final MessageFactory messageFactory; private final MbusRequest request; private final DocumentMessage requestMsg; private final ResponseHandler responseHandler; public final static String internalNoThrottledSource = "internalNoThrottledSource"; private final static String internalNoThrottledSourcePath = "/" + internalNoThrottledSource; public MbusRequestContext(MbusRequest request, ResponseHandler responseHandler, ComponentRegistry<DocprocService> docprocServiceComponentRegistry, ComponentRegistry<AbstractConcreteDocumentFactory> docFactoryRegistry, ContainerDocumentConfig containerDocConfig) { this.request = request; this.requestMsg = (DocumentMessage)request.getMessage(); this.responseHandler = responseHandler; this.processingFactory = new ProcessingFactory(docprocServiceComponentRegistry, docFactoryRegistry, containerDocConfig, getServiceName()); this.messageFactory = newMessageFactory(requestMsg); } @Override public List<Processing> getProcessings() { if (deserialized.getAndSet(true)) { return Collections.emptyList(); } return processingFactory.fromMessage(requestMsg); } @Override public void skip() { if (deserialized.get()) throw new IllegalStateException("Can not skip processing after deserialization"); dispatchRequest(requestMsg, request.getUri().getPath(), responseHandler); } @Override @Override public void processingFailed(Exception exception) { ErrorCode errorCode; if (exception instanceof TransientFailureException) { errorCode = ErrorCode.ERROR_ABORTED; } else { errorCode = ErrorCode.ERROR_PROCESSING_FAILURE; } StringBuilder errorMsg = new StringBuilder("Processing failed."); if (exception instanceof HandledProcessingException) { errorMsg.append(" Error message: ").append(exception.getMessage()); } else if (exception != null) { errorMsg.append(" Error message: ").append(exception.toString()); } errorMsg.append(" -- See Vespa log for details."); processingFailed(errorCode, errorMsg.toString()); } @Override public void processingFailed(ErrorCode errorCode, String errorMsg) { MbusResponse response = new MbusResponse(errorCode.getDiscStatus(), requestMsg.createReply()); response.getReply().addError(new com.yahoo.messagebus.Error(errorCode.getDocumentProtocolStatus(), errorMsg)); ResponseDispatch.newInstance(response).dispatch(this); } @Override public URI getUri() { return request.getUri(); } @Override public String getServiceName() { String path = getUri().getPath(); return path.substring(7, path.length()); } @Override public boolean isProcessable() { Message msg = requestMsg; switch (msg.getType()) { case DocumentProtocol.MESSAGE_PUTDOCUMENT: case DocumentProtocol.MESSAGE_UPDATEDOCUMENT: case DocumentProtocol.MESSAGE_REMOVEDOCUMENT: return true; } return false; } @Override public boolean hasExpired() { return requestMsg.isExpired(); } @Override public ContentChannel handleResponse(Response response) { if (responded.getAndSet(true)) { return null; } Reply reply = ((MbusResponse)response).getReply(); reply.swapState(requestMsg); return responseHandler.handleResponse(response); } private void dispatchResponse(int status) { ResponseDispatch.newInstance(new MbusResponse(status, requestMsg.createReply())).dispatch(this); } private void dispatchRequest(Message msg, String uriPath, ResponseHandler handler) { try { new RequestDispatch() { @Override protected Request newRequest() { return new MbusRequest(request, resolveUri(uriPath), msg); } @Override public ContentChannel handleResponse(Response response) { return handler.handleResponse(response); } }.dispatch(); } catch (Exception e) { dispatchResponse(Response.Status.INTERNAL_SERVER_ERROR); e.printStackTrace(); } } private static MessageFactory newMessageFactory(DocumentMessage message) { if (message == null) return null; if (message.getRoute() == null || ! message.getRoute().hasHops()) return null; return new MessageFactory(message); } private static URI resolveUri(String path) { URI uri = uriCache.get(path); if (uri == null) { uri = URI.create("mbus: uriCache.put(path, uri); } return uri; } }
this must still be just: ``` if (decodedDoubles == null) decodedDoubles = new HashMap<>(); ``` same with decodedTensors below. but since in all common cases you will always create it, I would suggest doing that first in the method. That should make the code easier to read.
public Double getDouble(String featureName) { Double value = null; if (decodedDoubles != null) value = decodedDoubles.get(featureName); if (value != null) return value; value = decodeDouble(featureName); if (value != null && decodedDoubles == null) decodedDoubles = new HashMap<>(); decodedDoubles.put(featureName, value); return value; }
if (value != null && decodedDoubles == null)
public Double getDouble(String featureName) { if (decodedDoubles == null) decodedDoubles = new HashMap<>(); Double value = decodedDoubles.get(featureName); if (value != null) return value; value = decodeDouble(featureName); if (value != null) decodedDoubles.put(featureName, value); return value; }
class FeatureData implements Inspectable, JsonProducer { private static final FeatureData empty = new FeatureData(Value.empty()); private final Inspector value; private Set<String> featureNames = null; /** Cached decoded values */ private Map<String, Double> decodedDoubles = null; private Map<String, Tensor> decodedTensors = null; private String jsonForm = null; public FeatureData(Inspector value) { this.value = value; } public static FeatureData empty() { return empty; } /** * Returns the fields of this as an inspector, where tensors are represented as binary data * which can be decoded using * <code>com.yahoo.tensor.serialization.TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(featureValue.asData()))</code> */ @Override public Inspector inspect() { return value; } @Override public String toJson() { if (this == empty) return "{}"; if (jsonForm != null) return jsonForm; jsonForm = writeJson(new StringBuilder()).toString(); return jsonForm; } @Override public StringBuilder writeJson(StringBuilder target) { return JsonRender.render(value, new Encoder(target, true)); } /** * Returns the value of a scalar feature, or null if it is not present. * * @throws IllegalArgumentException if the value exists but isn't a scalar * (that is, if it is a tensor with nonzero rank) */ private Double decodeDouble(String featureName) { Inspector featureValue = getInspector(featureName); if ( ! featureValue.valid()) return null; switch (featureValue.type()) { case DOUBLE: return featureValue.asDouble(); case DATA: throw new IllegalArgumentException("Feature '" + featureName + "' is a tensor, not a double"); default: throw new IllegalStateException("Unexpected feature value type " + featureValue.type()); } } /** * Returns the value of a tensor feature, or null if it is not present. * This will return any feature value: Scalars are returned as a rank 0 tensor. */ public Tensor getTensor(String featureName) { Tensor value = null; if (decodedTensors != null) value = decodedTensors.get(featureName); if (value != null) return value; value = decodeTensor(featureName); if (value != null && decodedTensors == null) decodedTensors = new HashMap<>(); decodedTensors.put(featureName, value); return value; } private Tensor decodeTensor(String featureName) { Inspector featureValue = getInspector(featureName); if ( ! featureValue.valid()) return null; switch (featureValue.type()) { case DOUBLE: return Tensor.from(featureValue.asDouble()); case DATA: return TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(featureValue.asData())); default: throw new IllegalStateException("Unexpected feature value type " + featureValue.type()); } } private Inspector getInspector(String featureName) { Inspector featureValue = value.field(featureName); if (featureValue.valid()) return featureValue; return value.field("rankingExpression(" + featureName + ")"); } /** Returns the names of the features available in this */ public Set<String> featureNames() { if (this == empty) return Collections.emptySet(); if (featureNames != null) return featureNames; featureNames = new HashSet<>(); value.fields().forEach(field -> featureNames.add(field.getKey())); return featureNames; } @Override public String toString() { if (value.type() == Type.EMPTY) return ""; return toJson(); } @Override public int hashCode() { return toJson().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof FeatureData)) return false; return ((FeatureData)other).toJson().equals(this.toJson()); } /** A JSON encoder which encodes DATA as a tensor */ private static class Encoder extends JsonRender.StringEncoder { Encoder(StringBuilder out, boolean compact) { super(out, compact); } @Override public void encodeDATA(byte[] value) { target().append(new String(JsonFormat.encodeWithType(TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(value))), StandardCharsets.UTF_8)); } } }
class FeatureData implements Inspectable, JsonProducer { private static final FeatureData empty = new FeatureData(Value.empty()); private final Inspector value; private Set<String> featureNames = null; /** Cached decoded values */ private Map<String, Double> decodedDoubles = null; private Map<String, Tensor> decodedTensors = null; private String jsonForm = null; public FeatureData(Inspector value) { this.value = value; } public static FeatureData empty() { return empty; } /** * Returns the fields of this as an inspector, where tensors are represented as binary data * which can be decoded using * <code>com.yahoo.tensor.serialization.TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(featureValue.asData()))</code> */ @Override public Inspector inspect() { return value; } @Override public String toJson() { if (this == empty) return "{}"; if (jsonForm != null) return jsonForm; jsonForm = writeJson(new StringBuilder()).toString(); return jsonForm; } @Override public StringBuilder writeJson(StringBuilder target) { return JsonRender.render(value, new Encoder(target, true)); } /** * Returns the value of a scalar feature, or null if it is not present. * * @throws IllegalArgumentException if the value exists but isn't a scalar * (that is, if it is a tensor with nonzero rank) */ private Double decodeDouble(String featureName) { Inspector featureValue = getInspector(featureName); if ( ! featureValue.valid()) return null; switch (featureValue.type()) { case DOUBLE: return featureValue.asDouble(); case DATA: throw new IllegalArgumentException("Feature '" + featureName + "' is a tensor, not a double"); default: throw new IllegalStateException("Unexpected feature value type " + featureValue.type()); } } /** * Returns the value of a tensor feature, or null if it is not present. * This will return any feature value: Scalars are returned as a rank 0 tensor. */ public Tensor getTensor(String featureName) { if (decodedTensors == null) decodedTensors = new HashMap<>(); Tensor value = decodedTensors.get(featureName); if (value != null) return value; value = decodeTensor(featureName); if (value != null) decodedTensors.put(featureName, value); return value; } private Tensor decodeTensor(String featureName) { Inspector featureValue = getInspector(featureName); if ( ! featureValue.valid()) return null; switch (featureValue.type()) { case DOUBLE: return Tensor.from(featureValue.asDouble()); case DATA: return TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(featureValue.asData())); default: throw new IllegalStateException("Unexpected feature value type " + featureValue.type()); } } private Inspector getInspector(String featureName) { Inspector featureValue = value.field(featureName); if (featureValue.valid()) return featureValue; return value.field("rankingExpression(" + featureName + ")"); } /** Returns the names of the features available in this */ public Set<String> featureNames() { if (this == empty) return Collections.emptySet(); if (featureNames != null) return featureNames; featureNames = new HashSet<>(); value.fields().forEach(field -> featureNames.add(field.getKey())); return featureNames; } @Override public String toString() { if (value.type() == Type.EMPTY) return ""; return toJson(); } @Override public int hashCode() { return toJson().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof FeatureData)) return false; return ((FeatureData)other).toJson().equals(this.toJson()); } /** A JSON encoder which encodes DATA as a tensor */ private static class Encoder extends JsonRender.StringEncoder { Encoder(StringBuilder out, boolean compact) { super(out, compact); } @Override public void encodeDATA(byte[] value) { target().append(new String(JsonFormat.encodeWithType(TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(value))), StandardCharsets.UTF_8)); } } }
Good suggestion, I'll do that.
public Double getDouble(String featureName) { Double value = null; if (decodedDoubles != null) value = decodedDoubles.get(featureName); if (value != null) return value; value = decodeDouble(featureName); if (value != null && decodedDoubles == null) decodedDoubles = new HashMap<>(); decodedDoubles.put(featureName, value); return value; }
if (value != null && decodedDoubles == null)
public Double getDouble(String featureName) { if (decodedDoubles == null) decodedDoubles = new HashMap<>(); Double value = decodedDoubles.get(featureName); if (value != null) return value; value = decodeDouble(featureName); if (value != null) decodedDoubles.put(featureName, value); return value; }
class FeatureData implements Inspectable, JsonProducer { private static final FeatureData empty = new FeatureData(Value.empty()); private final Inspector value; private Set<String> featureNames = null; /** Cached decoded values */ private Map<String, Double> decodedDoubles = null; private Map<String, Tensor> decodedTensors = null; private String jsonForm = null; public FeatureData(Inspector value) { this.value = value; } public static FeatureData empty() { return empty; } /** * Returns the fields of this as an inspector, where tensors are represented as binary data * which can be decoded using * <code>com.yahoo.tensor.serialization.TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(featureValue.asData()))</code> */ @Override public Inspector inspect() { return value; } @Override public String toJson() { if (this == empty) return "{}"; if (jsonForm != null) return jsonForm; jsonForm = writeJson(new StringBuilder()).toString(); return jsonForm; } @Override public StringBuilder writeJson(StringBuilder target) { return JsonRender.render(value, new Encoder(target, true)); } /** * Returns the value of a scalar feature, or null if it is not present. * * @throws IllegalArgumentException if the value exists but isn't a scalar * (that is, if it is a tensor with nonzero rank) */ private Double decodeDouble(String featureName) { Inspector featureValue = getInspector(featureName); if ( ! featureValue.valid()) return null; switch (featureValue.type()) { case DOUBLE: return featureValue.asDouble(); case DATA: throw new IllegalArgumentException("Feature '" + featureName + "' is a tensor, not a double"); default: throw new IllegalStateException("Unexpected feature value type " + featureValue.type()); } } /** * Returns the value of a tensor feature, or null if it is not present. * This will return any feature value: Scalars are returned as a rank 0 tensor. */ public Tensor getTensor(String featureName) { Tensor value = null; if (decodedTensors != null) value = decodedTensors.get(featureName); if (value != null) return value; value = decodeTensor(featureName); if (value != null && decodedTensors == null) decodedTensors = new HashMap<>(); decodedTensors.put(featureName, value); return value; } private Tensor decodeTensor(String featureName) { Inspector featureValue = getInspector(featureName); if ( ! featureValue.valid()) return null; switch (featureValue.type()) { case DOUBLE: return Tensor.from(featureValue.asDouble()); case DATA: return TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(featureValue.asData())); default: throw new IllegalStateException("Unexpected feature value type " + featureValue.type()); } } private Inspector getInspector(String featureName) { Inspector featureValue = value.field(featureName); if (featureValue.valid()) return featureValue; return value.field("rankingExpression(" + featureName + ")"); } /** Returns the names of the features available in this */ public Set<String> featureNames() { if (this == empty) return Collections.emptySet(); if (featureNames != null) return featureNames; featureNames = new HashSet<>(); value.fields().forEach(field -> featureNames.add(field.getKey())); return featureNames; } @Override public String toString() { if (value.type() == Type.EMPTY) return ""; return toJson(); } @Override public int hashCode() { return toJson().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof FeatureData)) return false; return ((FeatureData)other).toJson().equals(this.toJson()); } /** A JSON encoder which encodes DATA as a tensor */ private static class Encoder extends JsonRender.StringEncoder { Encoder(StringBuilder out, boolean compact) { super(out, compact); } @Override public void encodeDATA(byte[] value) { target().append(new String(JsonFormat.encodeWithType(TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(value))), StandardCharsets.UTF_8)); } } }
class FeatureData implements Inspectable, JsonProducer { private static final FeatureData empty = new FeatureData(Value.empty()); private final Inspector value; private Set<String> featureNames = null; /** Cached decoded values */ private Map<String, Double> decodedDoubles = null; private Map<String, Tensor> decodedTensors = null; private String jsonForm = null; public FeatureData(Inspector value) { this.value = value; } public static FeatureData empty() { return empty; } /** * Returns the fields of this as an inspector, where tensors are represented as binary data * which can be decoded using * <code>com.yahoo.tensor.serialization.TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(featureValue.asData()))</code> */ @Override public Inspector inspect() { return value; } @Override public String toJson() { if (this == empty) return "{}"; if (jsonForm != null) return jsonForm; jsonForm = writeJson(new StringBuilder()).toString(); return jsonForm; } @Override public StringBuilder writeJson(StringBuilder target) { return JsonRender.render(value, new Encoder(target, true)); } /** * Returns the value of a scalar feature, or null if it is not present. * * @throws IllegalArgumentException if the value exists but isn't a scalar * (that is, if it is a tensor with nonzero rank) */ private Double decodeDouble(String featureName) { Inspector featureValue = getInspector(featureName); if ( ! featureValue.valid()) return null; switch (featureValue.type()) { case DOUBLE: return featureValue.asDouble(); case DATA: throw new IllegalArgumentException("Feature '" + featureName + "' is a tensor, not a double"); default: throw new IllegalStateException("Unexpected feature value type " + featureValue.type()); } } /** * Returns the value of a tensor feature, or null if it is not present. * This will return any feature value: Scalars are returned as a rank 0 tensor. */ public Tensor getTensor(String featureName) { if (decodedTensors == null) decodedTensors = new HashMap<>(); Tensor value = decodedTensors.get(featureName); if (value != null) return value; value = decodeTensor(featureName); if (value != null) decodedTensors.put(featureName, value); return value; } private Tensor decodeTensor(String featureName) { Inspector featureValue = getInspector(featureName); if ( ! featureValue.valid()) return null; switch (featureValue.type()) { case DOUBLE: return Tensor.from(featureValue.asDouble()); case DATA: return TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(featureValue.asData())); default: throw new IllegalStateException("Unexpected feature value type " + featureValue.type()); } } private Inspector getInspector(String featureName) { Inspector featureValue = value.field(featureName); if (featureValue.valid()) return featureValue; return value.field("rankingExpression(" + featureName + ")"); } /** Returns the names of the features available in this */ public Set<String> featureNames() { if (this == empty) return Collections.emptySet(); if (featureNames != null) return featureNames; featureNames = new HashSet<>(); value.fields().forEach(field -> featureNames.add(field.getKey())); return featureNames; } @Override public String toString() { if (value.type() == Type.EMPTY) return ""; return toJson(); } @Override public int hashCode() { return toJson().hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof FeatureData)) return false; return ((FeatureData)other).toJson().equals(this.toJson()); } /** A JSON encoder which encodes DATA as a tensor */ private static class Encoder extends JsonRender.StringEncoder { Encoder(StringBuilder out, boolean compact) { super(out, compact); } @Override public void encodeDATA(byte[] value) { target().append(new String(JsonFormat.encodeWithType(TypedBinaryFormat.decode(Optional.empty(), GrowableByteBuffer.wrap(value))), StandardCharsets.UTF_8)); } } }
Might be useful to include the last reason here?
private ImportedModel convertToOnnxAndImport(String modelName, String modelDir) { Path tempDir = null; try { tempDir = Files.createTempDirectory("tf2onnx"); String convertedPath = tempDir.toString() + File.separatorChar + "converted.onnx"; for (int opset : onnxOpsetsToTry) { log.info("Converting TensorFlow model '" + modelDir + "' to ONNX with opset " + opset + "..."); Pair<Integer, String> res = convertToOnnx(modelDir, convertedPath, opset); if (res.getFirst() == 0) { log.info("Conversion to ONNX with opset " + opset + " successful."); return onnxImporter.importModel(modelName, convertedPath); } log.info("Conversion to ONNX with opset " + opset + " failed. Reason: " + res.getSecond()); } throw new IllegalArgumentException("Unable to convert TensorFlow model in '" + modelDir + "' to ONNX."); } catch (IOException e) { throw new IllegalArgumentException("Conversion from TensorFlow to ONNX failed for '" + modelDir + "'"); } finally { if (tempDir != null) { IOUtils.recursiveDeleteDir(tempDir.toFile()); } } }
throw new IllegalArgumentException("Unable to convert TensorFlow model in '" + modelDir + "' to ONNX.");
private ImportedModel convertToOnnxAndImport(String modelName, String modelDir) { Path tempDir = null; try { tempDir = Files.createTempDirectory("tf2onnx"); String convertedPath = tempDir.toString() + File.separatorChar + "converted.onnx"; String outputOfLastConversionAttempt = ""; for (int opset : onnxOpsetsToTry) { log.info("Converting TensorFlow model '" + modelDir + "' to ONNX with opset " + opset + "..."); Pair<Integer, String> res = convertToOnnx(modelDir, convertedPath, opset); if (res.getFirst() == 0) { log.info("Conversion to ONNX with opset " + opset + " successful."); return onnxImporter.importModel(modelName, convertedPath); } log.fine("Conversion to ONNX with opset " + opset + " failed. Reason: " + res.getSecond()); outputOfLastConversionAttempt = res.getSecond(); } throw new IllegalArgumentException("Unable to convert TensorFlow model in '" + modelDir + "' to ONNX. " + "Reason: " + outputOfLastConversionAttempt); } catch (IOException e) { throw new IllegalArgumentException("Conversion from TensorFlow to ONNX failed for '" + modelDir + "'"); } finally { if (tempDir != null) { IOUtils.recursiveDeleteDir(tempDir.toFile()); } } }
class TensorFlowImporter extends ModelImporter { private static final Logger log = Logger.getLogger(TensorFlowImporter.class.getName()); private final static int[] onnxOpsetsToTry = {8, 10, 12}; private final OnnxImporter onnxImporter = new OnnxImporter(); @Override public boolean canImport(String modelPath) { File modelDir = new File(modelPath); if ( ! modelDir.isDirectory()) return false; for (File file : modelDir.listFiles()) { if (file.toString().endsWith(".pbtxt")) return true; if (file.toString().endsWith(".pb")) return true; } return false; } /** * Imports a saved TensorFlow model from a directory. * The model should be saved as a .pbtxt or .pb file. * * @param modelName the name of the model to import, consisting of characters in [A-Za-z0-9_] * @param modelDir the directory containing the TensorFlow model files to import */ @Override public ImportedModel importModel(String modelName, String modelDir) { return convertToOnnxAndImport(modelName, modelDir); } /** Imports a TensorFlow model - DEPRECATED */ public ImportedModel importModel(String modelName, String modelDir, SavedModelBundle model) { try { IntermediateGraph graph = GraphImporter.importGraph(modelName, model); return convertIntermediateGraphToModel(graph, modelDir); } catch (IOException e) { throw new IllegalArgumentException("Could not import TensorFlow model '" + model + "'", e); } } private Pair<Integer, String> convertToOnnx(String savedModel, String output, int opset) throws IOException { ProcessExecuter executer = new ProcessExecuter(); String job = "vespa-convert-tf2onnx --saved-model " + savedModel + " --output " + output + " --opset " + opset; return executer.exec(job); } }
class TensorFlowImporter extends ModelImporter { private static final Logger log = Logger.getLogger(TensorFlowImporter.class.getName()); private final static int[] onnxOpsetsToTry = {8, 10, 12}; private final OnnxImporter onnxImporter = new OnnxImporter(); @Override public boolean canImport(String modelPath) { File modelDir = new File(modelPath); if ( ! modelDir.isDirectory()) return false; for (File file : modelDir.listFiles()) { if (file.toString().endsWith(".pbtxt")) return true; if (file.toString().endsWith(".pb")) return true; } return false; } /** * Imports a saved TensorFlow model from a directory. * The model should be saved as a .pbtxt or .pb file. * * @param modelName the name of the model to import, consisting of characters in [A-Za-z0-9_] * @param modelDir the directory containing the TensorFlow model files to import */ @Override public ImportedModel importModel(String modelName, String modelDir) { return convertToOnnxAndImport(modelName, modelDir); } /** Imports a TensorFlow model - DEPRECATED */ public ImportedModel importModel(String modelName, String modelDir, SavedModelBundle model) { try { IntermediateGraph graph = GraphImporter.importGraph(modelName, model); return convertIntermediateGraphToModel(graph, modelDir); } catch (IOException e) { throw new IllegalArgumentException("Could not import TensorFlow model '" + model + "'", e); } } private Pair<Integer, String> convertToOnnx(String savedModel, String output, int opset) throws IOException { ProcessExecuter executer = new ProcessExecuter(); String job = "vespa-convert-tf2onnx --saved-model " + savedModel + " --output " + output + " --opset " + opset; return executer.exec(job); } }
I think this should probably be Level.FINE?
private ImportedModel convertToOnnxAndImport(String modelName, String modelDir) { Path tempDir = null; try { tempDir = Files.createTempDirectory("tf2onnx"); String convertedPath = tempDir.toString() + File.separatorChar + "converted.onnx"; for (int opset : onnxOpsetsToTry) { log.info("Converting TensorFlow model '" + modelDir + "' to ONNX with opset " + opset + "..."); Pair<Integer, String> res = convertToOnnx(modelDir, convertedPath, opset); if (res.getFirst() == 0) { log.info("Conversion to ONNX with opset " + opset + " successful."); return onnxImporter.importModel(modelName, convertedPath); } log.info("Conversion to ONNX with opset " + opset + " failed. Reason: " + res.getSecond()); } throw new IllegalArgumentException("Unable to convert TensorFlow model in '" + modelDir + "' to ONNX."); } catch (IOException e) { throw new IllegalArgumentException("Conversion from TensorFlow to ONNX failed for '" + modelDir + "'"); } finally { if (tempDir != null) { IOUtils.recursiveDeleteDir(tempDir.toFile()); } } }
log.info("Conversion to ONNX with opset " + opset + " failed. Reason: " + res.getSecond());
private ImportedModel convertToOnnxAndImport(String modelName, String modelDir) { Path tempDir = null; try { tempDir = Files.createTempDirectory("tf2onnx"); String convertedPath = tempDir.toString() + File.separatorChar + "converted.onnx"; String outputOfLastConversionAttempt = ""; for (int opset : onnxOpsetsToTry) { log.info("Converting TensorFlow model '" + modelDir + "' to ONNX with opset " + opset + "..."); Pair<Integer, String> res = convertToOnnx(modelDir, convertedPath, opset); if (res.getFirst() == 0) { log.info("Conversion to ONNX with opset " + opset + " successful."); return onnxImporter.importModel(modelName, convertedPath); } log.fine("Conversion to ONNX with opset " + opset + " failed. Reason: " + res.getSecond()); outputOfLastConversionAttempt = res.getSecond(); } throw new IllegalArgumentException("Unable to convert TensorFlow model in '" + modelDir + "' to ONNX. " + "Reason: " + outputOfLastConversionAttempt); } catch (IOException e) { throw new IllegalArgumentException("Conversion from TensorFlow to ONNX failed for '" + modelDir + "'"); } finally { if (tempDir != null) { IOUtils.recursiveDeleteDir(tempDir.toFile()); } } }
class TensorFlowImporter extends ModelImporter { private static final Logger log = Logger.getLogger(TensorFlowImporter.class.getName()); private final static int[] onnxOpsetsToTry = {8, 10, 12}; private final OnnxImporter onnxImporter = new OnnxImporter(); @Override public boolean canImport(String modelPath) { File modelDir = new File(modelPath); if ( ! modelDir.isDirectory()) return false; for (File file : modelDir.listFiles()) { if (file.toString().endsWith(".pbtxt")) return true; if (file.toString().endsWith(".pb")) return true; } return false; } /** * Imports a saved TensorFlow model from a directory. * The model should be saved as a .pbtxt or .pb file. * * @param modelName the name of the model to import, consisting of characters in [A-Za-z0-9_] * @param modelDir the directory containing the TensorFlow model files to import */ @Override public ImportedModel importModel(String modelName, String modelDir) { return convertToOnnxAndImport(modelName, modelDir); } /** Imports a TensorFlow model - DEPRECATED */ public ImportedModel importModel(String modelName, String modelDir, SavedModelBundle model) { try { IntermediateGraph graph = GraphImporter.importGraph(modelName, model); return convertIntermediateGraphToModel(graph, modelDir); } catch (IOException e) { throw new IllegalArgumentException("Could not import TensorFlow model '" + model + "'", e); } } private Pair<Integer, String> convertToOnnx(String savedModel, String output, int opset) throws IOException { ProcessExecuter executer = new ProcessExecuter(); String job = "vespa-convert-tf2onnx --saved-model " + savedModel + " --output " + output + " --opset " + opset; return executer.exec(job); } }
class TensorFlowImporter extends ModelImporter { private static final Logger log = Logger.getLogger(TensorFlowImporter.class.getName()); private final static int[] onnxOpsetsToTry = {8, 10, 12}; private final OnnxImporter onnxImporter = new OnnxImporter(); @Override public boolean canImport(String modelPath) { File modelDir = new File(modelPath); if ( ! modelDir.isDirectory()) return false; for (File file : modelDir.listFiles()) { if (file.toString().endsWith(".pbtxt")) return true; if (file.toString().endsWith(".pb")) return true; } return false; } /** * Imports a saved TensorFlow model from a directory. * The model should be saved as a .pbtxt or .pb file. * * @param modelName the name of the model to import, consisting of characters in [A-Za-z0-9_] * @param modelDir the directory containing the TensorFlow model files to import */ @Override public ImportedModel importModel(String modelName, String modelDir) { return convertToOnnxAndImport(modelName, modelDir); } /** Imports a TensorFlow model - DEPRECATED */ public ImportedModel importModel(String modelName, String modelDir, SavedModelBundle model) { try { IntermediateGraph graph = GraphImporter.importGraph(modelName, model); return convertIntermediateGraphToModel(graph, modelDir); } catch (IOException e) { throw new IllegalArgumentException("Could not import TensorFlow model '" + model + "'", e); } } private Pair<Integer, String> convertToOnnx(String savedModel, String output, int opset) throws IOException { ProcessExecuter executer = new ProcessExecuter(); String job = "vespa-convert-tf2onnx --saved-model " + savedModel + " --output " + output + " --opset " + opset; return executer.exec(job); } }
in the else 1. check `e0` should be split 2. ImmutableList.of(e0, e2) rather than ImmutableList.of(e0, e1) ?
public ParseNode visitSimpleFunctionCall(StarRocksParser.SimpleFunctionCallContext context) { String fullFunctionName = getQualifiedName(context.qualifiedName()).toString(); NodePosition pos = createPos(context); FunctionName fnName = FunctionName.createFnName(fullFunctionName); String functionName = fnName.getFunction(); if (functionName.equals(FunctionSet.TIME_SLICE) || functionName.equals(FunctionSet.DATE_SLICE)) { if (context.expression().size() == 2) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); if (!(e2 instanceof IntervalLiteral)) { e2 = new IntervalLiteral(e2, new UnitIdentifier("DAY")); } IntervalLiteral intervalLiteral = (IntervalLiteral) e2; FunctionCallExpr functionCallExpr = new FunctionCallExpr(fnName, getArgumentsForTimeSlice(e1, intervalLiteral.getValue(), intervalLiteral.getUnitIdentifier().getDescription().toLowerCase(), "floor"), pos); return functionCallExpr; } else if (context.expression().size() == 3) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); if (!(e2 instanceof IntervalLiteral)) { e2 = new IntervalLiteral(e2, new UnitIdentifier("DAY")); } IntervalLiteral intervalLiteral = (IntervalLiteral) e2; ParseNode e3 = visit(context.expression(2)); if (!(e3 instanceof UnitBoundary)) { throw new ParsingException(PARSER_ERROR_MSG.wrongTypeOfArgs(functionName), e3.getPos()); } UnitBoundary unitBoundary = (UnitBoundary) e3; FunctionCallExpr functionCallExpr = new FunctionCallExpr(fnName, getArgumentsForTimeSlice(e1, intervalLiteral.getValue(), intervalLiteral.getUnitIdentifier().getDescription().toLowerCase(), unitBoundary.getDescription().toLowerCase()), pos); return functionCallExpr; } else if (context.expression().size() == 4) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); Expr e3 = (Expr) visit(context.expression(2)); Expr e4 = (Expr) visit(context.expression(3)); if (!(e3 instanceof StringLiteral)) { throw new ParsingException(PARSER_ERROR_MSG.wrongTypeOfArgs(functionName), e3.getPos()); } String ident = ((StringLiteral) e3).getValue(); if (!(e4 instanceof StringLiteral)) { throw new ParsingException(PARSER_ERROR_MSG.wrongTypeOfArgs(functionName), e4.getPos()); } String boundary = ((StringLiteral) e4).getValue(); return new FunctionCallExpr(fnName, getArgumentsForTimeSlice(e1, e2, ident, boundary)); } else { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } } if (DATE_FUNCTIONS.contains(functionName)) { if (context.expression().size() != 2) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); if (!(e2 instanceof IntervalLiteral)) { e2 = new IntervalLiteral(e2, new UnitIdentifier("DAY")); } IntervalLiteral intervalLiteral = (IntervalLiteral) e2; return new TimestampArithmeticExpr(functionName, e1, intervalLiteral.getValue(), intervalLiteral.getUnitIdentifier().getDescription(), pos); } if (functionName.equals(FunctionSet.ELEMENT_AT)) { List<Expr> params = visit(context.expression(), Expr.class); if (params.size() != 2) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } return new CollectionElementExpr(params.get(0), params.get(1), false); } if (functionName.equals(FunctionSet.ISNULL)) { List<Expr> params = visit(context.expression(), Expr.class); if (params.size() != 1) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } return new IsNullPredicate(params.get(0), false, pos); } if (functionName.equals(FunctionSet.ISNOTNULL)) { List<Expr> params = visit(context.expression(), Expr.class); if (params.size() != 1) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } return new IsNullPredicate(params.get(0), true, pos); } if (ArithmeticExpr.isArithmeticExpr(fnName.getFunction())) { if (context.expression().size() < 1) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = context.expression().size() > 1 ? (Expr) visit(context.expression(1)) : null; return new ArithmeticExpr(ArithmeticExpr.getArithmeticOperator(fnName.getFunction()), e1, e2, pos); } if (functionName.equals(FunctionSet.STR_TO_MAP)) { Expr e0; Expr e1; Expr e2; String collectionDelimiter = ","; String mapDelimiter = ":"; if (context.expression().size() == 1) { e0 = (Expr) visit(context.expression(0)); e1 = new StringLiteral(collectionDelimiter, pos); e2 = new StringLiteral(mapDelimiter, pos); } else if (context.expression().size() == 2) { e0 = (Expr) visit(context.expression(0)); e1 = (Expr) visit(context.expression(1)); e2 = new StringLiteral(mapDelimiter, pos); } else if (context.expression().size() == 3) { e0 = (Expr) visit(context.expression(0)); e1 = (Expr) visit(context.expression(1)); e2 = (Expr) visit(context.expression(2)); } else { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(FunctionSet.STR_TO_MAP)); } if (e0.getType().isStringType()) { FunctionCallExpr split = new FunctionCallExpr(FunctionSet.SPLIT, ImmutableList.of(e0, e1), pos); return new FunctionCallExpr(functionName, ImmutableList.of(split, e2), pos); } else { return new FunctionCallExpr(functionName, ImmutableList.of(e0, e1), pos); } } if (fnName.getFunction().equalsIgnoreCase(FunctionSet.CONNECTION_ID)) { return new InformationFunction(FunctionSet.CONNECTION_ID.toUpperCase()); } if (functionName.equals(FunctionSet.MAP)) { List<Expr> exprs; if (context.expression() != null) { int num = context.expression().size(); if (num % 2 == 1) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(num, "map()", "Arguments must be in key/value pairs"), pos); } exprs = visit(context.expression(), Expr.class); } else { exprs = Collections.emptyList(); } return new MapExpr(Type.ANY_MAP, exprs, pos); } if (functionName.equals(FunctionSet.SUBSTR) || functionName.equals(FunctionSet.SUBSTRING)) { List<Expr> exprs = Lists.newArrayList(); if (context.expression().size() == 2) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); exprs.add(e1); addArgumentUseTypeInt(e2, exprs); } else if (context.expression().size() == 3) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); Expr e3 = (Expr) visit(context.expression(2)); exprs.add(e1); addArgumentUseTypeInt(e2, exprs); addArgumentUseTypeInt(e3, exprs); } return new FunctionCallExpr(fnName, exprs, pos); } if (functionName.equals(FunctionSet.LPAD) || functionName.equals(FunctionSet.RPAD)) { if (context.expression().size() == 2) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); FunctionCallExpr functionCallExpr = new FunctionCallExpr( fnName, Lists.newArrayList(e1, e2, new StringLiteral(" ")), pos); return functionCallExpr; } } if (functionName.equals(FunctionSet.DICT_MAPPING)) { List<Expr> params = visit(context.expression(), Expr.class); return new DictQueryExpr(params); } FunctionCallExpr functionCallExpr = new FunctionCallExpr(fnName, new FunctionParams(false, visit(context.expression(), Expr.class)), pos); if (context.over() != null) { return buildOverClause(functionCallExpr, context.over(), pos); } return SyntaxSugars.parse(functionCallExpr); }
return new FunctionCallExpr(functionName, ImmutableList.of(e0, e1), pos);
public ParseNode visitSimpleFunctionCall(StarRocksParser.SimpleFunctionCallContext context) { String fullFunctionName = getQualifiedName(context.qualifiedName()).toString(); NodePosition pos = createPos(context); FunctionName fnName = FunctionName.createFnName(fullFunctionName); String functionName = fnName.getFunction(); if (functionName.equals(FunctionSet.TIME_SLICE) || functionName.equals(FunctionSet.DATE_SLICE)) { if (context.expression().size() == 2) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); if (!(e2 instanceof IntervalLiteral)) { e2 = new IntervalLiteral(e2, new UnitIdentifier("DAY")); } IntervalLiteral intervalLiteral = (IntervalLiteral) e2; FunctionCallExpr functionCallExpr = new FunctionCallExpr(fnName, getArgumentsForTimeSlice(e1, intervalLiteral.getValue(), intervalLiteral.getUnitIdentifier().getDescription().toLowerCase(), "floor"), pos); return functionCallExpr; } else if (context.expression().size() == 3) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); if (!(e2 instanceof IntervalLiteral)) { e2 = new IntervalLiteral(e2, new UnitIdentifier("DAY")); } IntervalLiteral intervalLiteral = (IntervalLiteral) e2; ParseNode e3 = visit(context.expression(2)); if (!(e3 instanceof UnitBoundary)) { throw new ParsingException(PARSER_ERROR_MSG.wrongTypeOfArgs(functionName), e3.getPos()); } UnitBoundary unitBoundary = (UnitBoundary) e3; FunctionCallExpr functionCallExpr = new FunctionCallExpr(fnName, getArgumentsForTimeSlice(e1, intervalLiteral.getValue(), intervalLiteral.getUnitIdentifier().getDescription().toLowerCase(), unitBoundary.getDescription().toLowerCase()), pos); return functionCallExpr; } else if (context.expression().size() == 4) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); Expr e3 = (Expr) visit(context.expression(2)); Expr e4 = (Expr) visit(context.expression(3)); if (!(e3 instanceof StringLiteral)) { throw new ParsingException(PARSER_ERROR_MSG.wrongTypeOfArgs(functionName), e3.getPos()); } String ident = ((StringLiteral) e3).getValue(); if (!(e4 instanceof StringLiteral)) { throw new ParsingException(PARSER_ERROR_MSG.wrongTypeOfArgs(functionName), e4.getPos()); } String boundary = ((StringLiteral) e4).getValue(); return new FunctionCallExpr(fnName, getArgumentsForTimeSlice(e1, e2, ident, boundary)); } else { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } } if (DATE_FUNCTIONS.contains(functionName)) { if (context.expression().size() != 2) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); if (!(e2 instanceof IntervalLiteral)) { e2 = new IntervalLiteral(e2, new UnitIdentifier("DAY")); } IntervalLiteral intervalLiteral = (IntervalLiteral) e2; return new TimestampArithmeticExpr(functionName, e1, intervalLiteral.getValue(), intervalLiteral.getUnitIdentifier().getDescription(), pos); } if (functionName.equals(FunctionSet.ELEMENT_AT)) { List<Expr> params = visit(context.expression(), Expr.class); if (params.size() != 2) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } return new CollectionElementExpr(params.get(0), params.get(1), false); } if (functionName.equals(FunctionSet.ISNULL)) { List<Expr> params = visit(context.expression(), Expr.class); if (params.size() != 1) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } return new IsNullPredicate(params.get(0), false, pos); } if (functionName.equals(FunctionSet.ISNOTNULL)) { List<Expr> params = visit(context.expression(), Expr.class); if (params.size() != 1) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } return new IsNullPredicate(params.get(0), true, pos); } if (ArithmeticExpr.isArithmeticExpr(fnName.getFunction())) { if (context.expression().size() < 1) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = context.expression().size() > 1 ? (Expr) visit(context.expression(1)) : null; return new ArithmeticExpr(ArithmeticExpr.getArithmeticOperator(fnName.getFunction()), e1, e2, pos); } if (functionName.equals(FunctionSet.STR_TO_MAP)) { Expr e0; Expr e1; Expr e2; String collectionDelimiter = ","; String mapDelimiter = ":"; if (context.expression().size() == 1) { e0 = (Expr) visit(context.expression(0)); e1 = new StringLiteral(collectionDelimiter, pos); e2 = new StringLiteral(mapDelimiter, pos); } else if (context.expression().size() == 2) { e0 = (Expr) visit(context.expression(0)); e1 = (Expr) visit(context.expression(1)); e2 = new StringLiteral(mapDelimiter, pos); } else if (context.expression().size() == 3) { e0 = (Expr) visit(context.expression(0)); e1 = (Expr) visit(context.expression(1)); e2 = (Expr) visit(context.expression(2)); } else { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(FunctionSet.STR_TO_MAP)); } return new FunctionCallExpr(functionName, ImmutableList.of(e0, e1, e2), pos); } if (fnName.getFunction().equalsIgnoreCase(FunctionSet.CONNECTION_ID)) { return new InformationFunction(FunctionSet.CONNECTION_ID.toUpperCase()); } if (functionName.equals(FunctionSet.MAP)) { List<Expr> exprs; if (context.expression() != null) { int num = context.expression().size(); if (num % 2 == 1) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(num, "map()", "Arguments must be in key/value pairs"), pos); } exprs = visit(context.expression(), Expr.class); } else { exprs = Collections.emptyList(); } return new MapExpr(Type.ANY_MAP, exprs, pos); } if (functionName.equals(FunctionSet.SUBSTR) || functionName.equals(FunctionSet.SUBSTRING)) { List<Expr> exprs = Lists.newArrayList(); if (context.expression().size() == 2) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); exprs.add(e1); addArgumentUseTypeInt(e2, exprs); } else if (context.expression().size() == 3) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); Expr e3 = (Expr) visit(context.expression(2)); exprs.add(e1); addArgumentUseTypeInt(e2, exprs); addArgumentUseTypeInt(e3, exprs); } return new FunctionCallExpr(fnName, exprs, pos); } if (functionName.equals(FunctionSet.LPAD) || functionName.equals(FunctionSet.RPAD)) { if (context.expression().size() == 2) { Expr e1 = (Expr) visit(context.expression(0)); Expr e2 = (Expr) visit(context.expression(1)); FunctionCallExpr functionCallExpr = new FunctionCallExpr( fnName, Lists.newArrayList(e1, e2, new StringLiteral(" ")), pos); return functionCallExpr; } } if (functionName.equals(FunctionSet.DICT_MAPPING)) { List<Expr> params = visit(context.expression(), Expr.class); return new DictQueryExpr(params); } FunctionCallExpr functionCallExpr = new FunctionCallExpr(fnName, new FunctionParams(false, visit(context.expression(), Expr.class)), pos); if (context.over() != null) { return buildOverClause(functionCallExpr, context.over(), pos); } return SyntaxSugars.parse(functionCallExpr); }
class AstBuilder extends StarRocksBaseVisitor<ParseNode> { private final long sqlMode; private final IdentityHashMap<ParserRuleContext, List<HintNode>> hintMap; private int placeHolderSlotId = 0; private List<Parameter> parameters; private static final BigInteger LONG_MAX = new BigInteger("9223372036854775807"); private static final BigInteger LARGEINT_MAX_ABS = new BigInteger("170141183460469231731687303715884105728"); private static final List<String> DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_ADD, FunctionSet.ADDDATE, FunctionSet.DATE_ADD, FunctionSet.DATE_SUB, FunctionSet.SUBDATE, FunctionSet.DAYS_SUB); private static final List<String> PARTITION_FUNCTIONS = Lists.newArrayList(FunctionSet.SUBSTR, FunctionSet.SUBSTRING, FunctionSet.FROM_UNIXTIME, FunctionSet.FROM_UNIXTIME_MS, FunctionSet.STR2DATE); public AstBuilder(long sqlMode) { this(sqlMode, new IdentityHashMap<>()); } public AstBuilder(long sqlMode, IdentityHashMap<ParserRuleContext, List<HintNode>> hintMap) { this.hintMap = hintMap; long hintSqlMode = 0L; for (Map.Entry<ParserRuleContext, List<HintNode>> entry : hintMap.entrySet()) { for (HintNode hint : entry.getValue()) { if (hint instanceof SetVarHint) { SetVarHint setVarHint = (SetVarHint) hint; hintSqlMode = setVarHint.getSqlModeHintValue(); } } } this.sqlMode = sqlMode | hintSqlMode; } public List<Parameter> getParameters() { return parameters; } @Override public ParseNode visitSingleStatement(StarRocksParser.SingleStatementContext context) { if (context.statement() != null) { StatementBase stmt = (StatementBase) visit(context.statement()); if (MapUtils.isNotEmpty(hintMap)) { stmt.setAllQueryScopeHints(extractQueryScopeHintNode()); hintMap.clear(); } return stmt; } else { return visit(context.emptyStatement()); } } @Override public ParseNode visitEmptyStatement(StarRocksParser.EmptyStatementContext context) { return new EmptyStmt(); } @Override public ParseNode visitUseDatabaseStatement(StarRocksParser.UseDatabaseStatementContext context) { NodePosition pos = createPos(context); QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); List<String> parts = qualifiedName.getParts(); if (parts.size() == 1) { return new UseDbStmt(null, parts.get(0), pos); } else if (parts.size() == 2) { return new UseDbStmt(parts.get(0), parts.get(1), pos); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidDbFormat(qualifiedName.toString()), qualifiedName.getPos()); } } @Override public ParseNode visitUseCatalogStatement(StarRocksParser.UseCatalogStatementContext context) { StringLiteral literal = (StringLiteral) visit(context.string()); return new UseCatalogStmt(literal.getValue(), createPos(context)); } @Override public ParseNode visitSetCatalogStatement(StarRocksParser.SetCatalogStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String catalogName = identifier.getValue(); return new SetCatalogStmt(catalogName, createPos(context)); } @Override public ParseNode visitShowDatabasesStatement(StarRocksParser.ShowDatabasesStatementContext context) { String catalog = null; NodePosition pos = createPos(context); if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new ShowDbStmt(stringLiteral.getValue(), null, catalog, pos); } else if (context.expression() != null) { return new ShowDbStmt(null, (Expr) visit(context.expression()), catalog, pos); } else { return new ShowDbStmt(null, null, catalog, pos); } } @Override public ParseNode visitAlterDbQuotaStatement(StarRocksParser.AlterDbQuotaStatementContext context) { String dbName = ((Identifier) visit(context.identifier(0))).getValue(); NodePosition pos = createPos(context); if (context.DATA() != null) { String quotaValue = ((Identifier) visit(context.identifier(1))).getValue(); return new AlterDatabaseQuotaStmt(dbName, AlterDatabaseQuotaStmt.QuotaType.DATA, quotaValue, pos); } else { String quotaValue = context.INTEGER_VALUE().getText(); return new AlterDatabaseQuotaStmt(dbName, AlterDatabaseQuotaStmt.QuotaType.REPLICA, quotaValue, pos); } } @Override public ParseNode visitCreateDbStatement(StarRocksParser.CreateDbStatementContext context) { String catalogName = ""; if (context.catalog != null) { catalogName = getIdentifierName(context.catalog); } String dbName = getIdentifierName(context.database); Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new CreateDbStmt(context.IF() != null, catalogName, dbName, properties, createPos(context)); } @Override public ParseNode visitDropDbStatement(StarRocksParser.DropDbStatementContext context) { String catalogName = ""; if (context.catalog != null) { catalogName = getIdentifierName(context.catalog); } String dbName = getIdentifierName(context.database); return new DropDbStmt(context.IF() != null, catalogName, dbName, context.FORCE() != null, createPos(context)); } @Override public ParseNode visitShowCreateDbStatement(StarRocksParser.ShowCreateDbStatementContext context) { String dbName = ((Identifier) visit(context.identifier())).getValue(); return new ShowCreateDbStmt(dbName, createPos(context)); } @Override public ParseNode visitAlterDatabaseRenameStatement(StarRocksParser.AlterDatabaseRenameStatementContext context) { String dbName = ((Identifier) visit(context.identifier(0))).getValue(); String newName = ((Identifier) visit(context.identifier(1))).getValue(); return new AlterDatabaseRenameStatement(dbName, newName, createPos(context)); } @Override public ParseNode visitRecoverDbStmt(StarRocksParser.RecoverDbStmtContext context) { String dbName = ((Identifier) visit(context.identifier())).getValue(); return new RecoverDbStmt(dbName, createPos(context)); } @Override public ParseNode visitShowDataStmt(StarRocksParser.ShowDataStmtContext context) { NodePosition pos = createPos(context); if (context.FROM() != null) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new ShowDataStmt(targetTableName.getDb(), targetTableName.getTbl(), pos); } else { return new ShowDataStmt(null, null, pos); } } @Override public ParseNode visitCreateTableStatement(StarRocksParser.CreateTableStatementContext context) { Map<String, String> properties = null; if (context.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } Map<String, String> extProperties = null; if (context.extProperties() != null) { extProperties = new HashMap<>(); List<Property> propertyList = visit(context.extProperties().properties().property(), Property.class); for (Property property : propertyList) { extProperties.put(property.getKey(), property.getValue()); } } TableName tableName = qualifiedNameToTableName(getQualifiedName(context.qualifiedName())); List<ColumnDef> columnDefs = null; if (context.columnDesc() != null) { columnDefs = getColumnDefs(context.columnDesc()); } return new CreateTableStmt( context.IF() != null, context.EXTERNAL() != null, tableName, columnDefs, context.indexDesc() == null ? null : getIndexDefs(context.indexDesc()), context.engineDesc() == null ? "" : ((Identifier) visit(context.engineDesc().identifier())).getValue(), context.charsetDesc() == null ? null : ((Identifier) visit(context.charsetDesc().identifierOrString())).getValue(), context.keyDesc() == null ? null : getKeysDesc(context.keyDesc()), context.partitionDesc() == null ? null : getPartitionDesc(context.partitionDesc(), columnDefs), context.distributionDesc() == null ? null : (DistributionDesc) visit(context.distributionDesc()), properties, extProperties, context.comment() == null ? null : ((StringLiteral) visit(context.comment().string())).getStringValue(), context.rollupDesc() == null ? null : context.rollupDesc().rollupItem().stream().map(this::getRollup).collect(toList()), context.orderByDesc() == null ? null : visit(context.orderByDesc().identifierList().identifier(), Identifier.class) .stream().map(Identifier::getValue).collect(toList())); } private PartitionDesc getPartitionDesc(StarRocksParser.PartitionDescContext context, List<ColumnDef> columnDefs) { List<PartitionDesc> partitionDescList = new ArrayList<>(); if (context.functionCall() != null) { String currentGranularity = null; for (StarRocksParser.RangePartitionDescContext rangePartitionDescContext : context.rangePartitionDesc()) { final PartitionDesc rangePartitionDesc = (PartitionDesc) visit(rangePartitionDescContext); if (!(rangePartitionDesc instanceof MultiRangePartitionDesc)) { throw new ParsingException("Automatic partition table creation only supports " + "batch create partition syntax", rangePartitionDesc.getPos()); } MultiRangePartitionDesc multiRangePartitionDesc = (MultiRangePartitionDesc) rangePartitionDesc; String descGranularity = multiRangePartitionDesc.getTimeUnit().toLowerCase(); if (currentGranularity == null) { currentGranularity = descGranularity; } else if (!currentGranularity.equals(descGranularity)) { throw new ParsingException("The partition granularity of automatic partition table " + "batch creation in advance should be consistent", rangePartitionDesc.getPos()); } partitionDescList.add(rangePartitionDesc); } FunctionCallExpr functionCallExpr = (FunctionCallExpr) visit(context.functionCall()); List<String> columnList = AnalyzerUtils.checkAndExtractPartitionCol(functionCallExpr, columnDefs); AnalyzerUtils.checkAutoPartitionTableLimit(functionCallExpr, currentGranularity); RangePartitionDesc rangePartitionDesc = new RangePartitionDesc(columnList, partitionDescList); rangePartitionDesc.setAutoPartitionTable(true); return new ExpressionPartitionDesc(rangePartitionDesc, functionCallExpr); } StarRocksParser.PrimaryExpressionContext primaryExpressionContext = context.primaryExpression(); if (primaryExpressionContext != null) { Expr primaryExpression = (Expr) visit(primaryExpressionContext); if (context.RANGE() != null) { for (StarRocksParser.RangePartitionDescContext rangePartitionDescContext : context.rangePartitionDesc()) { final PartitionDesc rangePartitionDesc = (PartitionDesc) visit(rangePartitionDescContext); partitionDescList.add(rangePartitionDesc); } } List<String> columnList = checkAndExtractPartitionColForRange(primaryExpression, false); RangePartitionDesc rangePartitionDesc = new RangePartitionDesc(columnList, partitionDescList); if (primaryExpression instanceof FunctionCallExpr) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) primaryExpression; String functionName = functionCallExpr.getFnName().getFunction(); if (FunctionSet.FROM_UNIXTIME.equals(functionName) || FunctionSet.FROM_UNIXTIME_MS.equals(functionName)) { primaryExpression = new CastExpr(TypeDef.create(PrimitiveType.DATETIME), primaryExpression); } } return new ExpressionPartitionDesc(rangePartitionDesc, primaryExpression); } List<Identifier> identifierList = visit(context.identifierList().identifier(), Identifier.class); List<String> columnList = identifierList.stream().map(Identifier::getValue).collect(toList()); if (context.RANGE() != null) { for (StarRocksParser.RangePartitionDescContext rangePartitionDescContext : context.rangePartitionDesc()) { final PartitionDesc rangePartitionDesc = (PartitionDesc) visit(rangePartitionDescContext); partitionDescList.add(rangePartitionDesc); } return new RangePartitionDesc(columnList, partitionDescList); } else if (context.LIST() != null) { for (StarRocksParser.ListPartitionDescContext listPartitionDescContext : context.listPartitionDesc()) { final PartitionDesc listPartitionDesc = (PartitionDesc) visit(listPartitionDescContext); partitionDescList.add(listPartitionDesc); } return new ListPartitionDesc(columnList, partitionDescList); } else { if (context.listPartitionDesc().size() > 0) { throw new ParsingException("Does not support creating partitions in advance"); } ListPartitionDesc listPartitionDesc = new ListPartitionDesc(columnList, partitionDescList); listPartitionDesc.setAutoPartitionTable(true); return listPartitionDesc; } } private List<String> checkAndExtractPartitionColForRange(Expr expr, boolean hasCast) { if (expr instanceof CastExpr) { CastExpr castExpr = (CastExpr) expr; return checkAndExtractPartitionColForRange(castExpr.getChild(0), true); } NodePosition pos = expr.getPos(); List<String> columnList = new ArrayList<>(); if (expr instanceof FunctionCallExpr) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) expr; String functionName = functionCallExpr.getFnName().getFunction().toLowerCase(); List<Expr> paramsExpr = functionCallExpr.getParams().exprs(); if (PARTITION_FUNCTIONS.contains(functionName)) { Expr firstExpr = paramsExpr.get(0); if (firstExpr instanceof SlotRef) { columnList.add(((SlotRef) firstExpr).getColumnName()); } else { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(expr.toSql(), "PARTITION BY"), pos); } } else { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(expr.toSql(), "PARTITION BY"), pos); } if (functionName.equals(FunctionSet.FROM_UNIXTIME) || functionName.equals(FunctionSet.FROM_UNIXTIME_MS)) { if (hasCast || paramsExpr.size() > 1) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(expr.toSql(), "PARTITION BY"), pos); } } } return columnList; } private AlterClause getRollup(StarRocksParser.RollupItemContext rollupItemContext) { String rollupName = ((Identifier) visit(rollupItemContext.identifier())).getValue(); List<Identifier> columnList = visit(rollupItemContext.identifierList().identifier(), Identifier.class); List<String> dupKeys = null; if (rollupItemContext.dupKeys() != null) { final List<Identifier> identifierList = visit(rollupItemContext.dupKeys().identifierList().identifier(), Identifier.class); dupKeys = identifierList.stream().map(Identifier::getValue).collect(toList()); } String baseRollupName = rollupItemContext.fromRollup() != null ? ((Identifier) visit(rollupItemContext.fromRollup().identifier())).getValue() : null; Map<String, String> properties = null; if (rollupItemContext.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(rollupItemContext.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new AddRollupClause(rollupName, columnList.stream().map(Identifier::getValue).collect(toList()), dupKeys, baseRollupName, properties, createPos(rollupItemContext)); } private KeysDesc getKeysDesc(StarRocksParser.KeyDescContext context) { KeysType keysType = null; if (null != context.PRIMARY()) { keysType = KeysType.PRIMARY_KEYS; } else if (null != context.DUPLICATE()) { keysType = KeysType.DUP_KEYS; } else if (null != context.AGGREGATE()) { keysType = KeysType.AGG_KEYS; } else if (null != context.UNIQUE()) { keysType = KeysType.UNIQUE_KEYS; } List<Identifier> columnList = visit(context.identifierList().identifier(), Identifier.class); return new KeysDesc(keysType, columnList.stream().map(Identifier::getValue).collect(toList()), createPos(context)); } private List<IndexDef> getIndexDefs(List<StarRocksParser.IndexDescContext> indexDesc) { List<IndexDef> indexDefList = new ArrayList<>(); for (StarRocksParser.IndexDescContext context : indexDesc) { String indexName = ((Identifier) visit(context.identifier())).getValue(); List<Identifier> columnList = visit(context.identifierList().identifier(), Identifier.class); String comment = context.comment() != null ? ((StringLiteral) visit(context.comment())).getStringValue() : null; final IndexDef indexDef = new IndexDef(indexName, columnList.stream().map(Identifier::getValue).collect(toList()), getIndexType(context.indexType()), comment, getPropertyList(context.propertyList()), createPos(context)); indexDefList.add(indexDef); } return indexDefList; } private List<ColumnDef> getColumnDefs(List<StarRocksParser.ColumnDescContext> columnDesc) { return columnDesc.stream().map(context -> getColumnDef(context)).collect(toList()); } private ColumnDef getColumnDef(StarRocksParser.ColumnDescContext context) { Identifier colIdentifier = (Identifier) visit(context.identifier()); String columnName = colIdentifier.getValue(); TypeDef typeDef = new TypeDef(getType(context.type()), createPos(context.type())); String charsetName = context.charsetName() != null ? ((Identifier) visit(context.charsetName().identifier())).getValue() : null; boolean isKey = context.KEY() != null; AggregateType aggregateType = context.aggDesc() != null ? AggregateType.valueOf(context.aggDesc().getText().toUpperCase()) : null; Boolean isAllowNull = null; if (context.NOT() != null && context.NULL() != null) { isAllowNull = false; } else if (context.NULL() != null) { isAllowNull = true; } Boolean isAutoIncrement = null; if (context.AUTO_INCREMENT() != null) { isAutoIncrement = true; } if (isAutoIncrement != null && isAllowNull != null && isAllowNull) { throw new ParsingException(PARSER_ERROR_MSG.nullColFoundInPK(columnName), colIdentifier.getPos()); } if (isAutoIncrement != null) { isAllowNull = false; } ColumnDef.DefaultValueDef defaultValueDef = ColumnDef.DefaultValueDef.NOT_SET; final StarRocksParser.DefaultDescContext defaultDescContext = context.defaultDesc(); if (defaultDescContext != null) { if (defaultDescContext.string() != null) { String value = ((StringLiteral) visit(defaultDescContext.string())).getStringValue(); defaultValueDef = new ColumnDef.DefaultValueDef(true, new StringLiteral(value)); } else if (defaultDescContext.NULL() != null) { defaultValueDef = ColumnDef.DefaultValueDef.NULL_DEFAULT_VALUE; } else if (defaultDescContext.CURRENT_TIMESTAMP() != null) { defaultValueDef = ColumnDef.DefaultValueDef.CURRENT_TIMESTAMP_VALUE; } else if (defaultDescContext.qualifiedName() != null) { String functionName = defaultDescContext.qualifiedName().getText().toLowerCase(); defaultValueDef = new ColumnDef.DefaultValueDef(true, new FunctionCallExpr(functionName, new ArrayList<>())); } } final StarRocksParser.GeneratedColumnDescContext generatedColumnDescContext = context.generatedColumnDesc(); Expr expr = null; if (generatedColumnDescContext != null) { if (isAllowNull != null && isAllowNull == false) { throw new ParsingException(PARSER_ERROR_MSG.foundNotNull("Generated Column")); } if (isKey) { throw new ParsingException(PARSER_ERROR_MSG.isKey("Generated Column")); } expr = (Expr) visit(generatedColumnDescContext.expression()); } String comment = context.comment() == null ? "" : ((StringLiteral) visit(context.comment().string())).getStringValue(); return new ColumnDef(columnName, typeDef, charsetName, isKey, aggregateType, isAllowNull, defaultValueDef, isAutoIncrement, expr, comment, createPos(context)); } @Override public ParseNode visitCreateTemporaryTableStatement(StarRocksParser.CreateTemporaryTableStatementContext context) { if (!Config.enable_experimental_temporary_table) { throw new SemanticException( "Temporary table feature is experimental and disabled by default, could be enabled through " + ": admin set frontend config('enable_experimental_temporary_table' = 'true')"); } CreateTableStmt createTableStmt = new CreateTableStmt( false, false, qualifiedNameToTableName(getQualifiedName(context.qualifiedName())), null, EngineType.defaultEngine().name(), null, null, null, new HashMap<>(), null, null); return new CreateTableAsSelectStmt( createTableStmt, null, (QueryStatement) visit(context.queryStatement())); } @Override public ParseNode visitCreateTableAsSelectStatement(StarRocksParser.CreateTableAsSelectStatementContext context) { Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } PartitionDesc partitionDesc = null; if (context.partitionDesc() != null) { partitionDesc = (PartitionDesc) visit(context.partitionDesc()); if (partitionDesc instanceof ListPartitionDesc && context.partitionDesc().LIST() == null) { ((ListPartitionDesc) partitionDesc).setAutoPartitionTable(true); } } CreateTableStmt createTableStmt = new CreateTableStmt( context.IF() != null, false, qualifiedNameToTableName(getQualifiedName(context.qualifiedName())), null, context.indexDesc() == null ? null : getIndexDefs(context.indexDesc()), "", null, context.keyDesc() == null ? null : getKeysDesc(context.keyDesc()), partitionDesc, context.distributionDesc() == null ? null : (DistributionDesc) visit(context.distributionDesc()), properties, null, context.comment() == null ? null : ((StringLiteral) visit(context.comment().string())).getStringValue(), null, context.orderByDesc() == null ? null : visit(context.orderByDesc().identifierList().identifier(), Identifier.class) .stream().map(Identifier::getValue).collect(toList()) ); List<Identifier> columns = visitIfPresent(context.identifier(), Identifier.class); return new CreateTableAsSelectStmt( createTableStmt, columns == null ? null : columns.stream().map(Identifier::getValue).collect(toList()), (QueryStatement) visit(context.queryStatement()), createPos(context)); } @Override public ParseNode visitCreateTableLikeStatement(StarRocksParser.CreateTableLikeStatementContext context) { PartitionDesc partitionDesc = context.partitionDesc() == null ? null : (PartitionDesc) visit(context.partitionDesc()); DistributionDesc distributionDesc = context.distributionDesc() == null ? null : (DistributionDesc) visit(context.distributionDesc()); Map<String, String> properties = getProperties(context.properties()); return new CreateTableLikeStmt(context.IF() != null, qualifiedNameToTableName(getQualifiedName(context.qualifiedName(0))), qualifiedNameToTableName(getQualifiedName(context.qualifiedName(1))), partitionDesc, distributionDesc, properties, createPos(context)); } @Override public ParseNode visitShowCreateTableStatement(StarRocksParser.ShowCreateTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); NodePosition pos = createPos(context); if (context.MATERIALIZED() != null && context.VIEW() != null) { return new ShowCreateTableStmt(targetTableName, ShowCreateTableStmt.CreateTableType.MATERIALIZED_VIEW, pos); } if (context.VIEW() != null) { return new ShowCreateTableStmt(targetTableName, ShowCreateTableStmt.CreateTableType.VIEW, pos); } return new ShowCreateTableStmt(targetTableName, ShowCreateTableStmt.CreateTableType.TABLE, pos); } @Override public ParseNode visitDropTableStatement(StarRocksParser.DropTableStatementContext context) { boolean ifExists = context.IF() != null && context.EXISTS() != null; boolean force = context.FORCE() != null; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new DropTableStmt(ifExists, targetTableName, false, force, createPos(context)); } @Override public ParseNode visitRecoverTableStatement(StarRocksParser.RecoverTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); return new RecoverTableStmt(tableName, createPos(context)); } @Override public ParseNode visitTruncateTableStatement(StarRocksParser.TruncateTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); Token start = context.start; Token stop = context.stop; PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } NodePosition pos = createPos(start, stop); return new TruncateTableStmt(new TableRef(targetTableName, null, partitionNames, pos)); } @Override public ParseNode visitShowTableStatement(StarRocksParser.ShowTableStatementContext context) { boolean isVerbose = context.FULL() != null; String database = null; String catalog = null; if (context.qualifiedName() != null) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); List<String> parts = qualifiedName.getParts(); if (parts.size() == 2) { catalog = qualifiedName.getParts().get(0); database = qualifiedName.getParts().get(1); } else if (parts.size() == 1) { database = qualifiedName.getParts().get(0); } } NodePosition pos = createPos(context); if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new ShowTableStmt(database, isVerbose, stringLiteral.getValue(), null, catalog, pos); } else if (context.expression() != null) { return new ShowTableStmt(database, isVerbose, null, (Expr) visit(context.expression()), catalog, pos); } else { return new ShowTableStmt(database, isVerbose, null, null, catalog, pos); } } @Override public ParseNode visitDescTableStatement(StarRocksParser.DescTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new DescribeStmt(targetTableName, context.ALL() != null, createPos(context)); } @Override public ParseNode visitShowTableStatusStatement(StarRocksParser.ShowTableStatusStatementContext context) { QualifiedName dbName = null; if (context.qualifiedName() != null) { dbName = getQualifiedName(context.db); } String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowTableStatusStmt(dbName == null ? null : dbName.toString(), pattern, where, createPos(context)); } @Override public ParseNode visitShowColumnStatement(StarRocksParser.ShowColumnStatementContext context) { QualifiedName tableName = getQualifiedName(context.table); QualifiedName dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db); } String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowColumnStmt(qualifiedNameToTableName(tableName), dbName == null ? null : dbName.toString(), pattern, context.FULL() != null, where, createPos(context)); } @Override public ParseNode visitRefreshTableStatement(StarRocksParser.RefreshTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); List<String> partitionNames = null; if (context.string() != null) { partitionNames = context.string().stream() .map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); } return new RefreshTableStmt(targetTableName, partitionNames, createPos(context)); } @Override public ParseNode visitAlterTableStatement(StarRocksParser.AlterTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); NodePosition pos = createPos(context); if (context.ROLLUP() != null) { if (context.ADD() != null) { List<AlterClause> clauses = context.rollupItem().stream().map(this::getRollup).collect(toList()); return new AlterTableStmt(targetTableName, clauses, pos); } else { List<Identifier> rollupList = visit(context.identifier(), Identifier.class); List<AlterClause> clauses = new ArrayList<>(); for (Identifier rollupName : rollupList) { clauses.add(new DropRollupClause(rollupName.getValue(), null, rollupName.getPos())); } return new AlterTableStmt(targetTableName, clauses, pos); } } else { List<AlterClause> alterClauses = visit(context.alterClause(), AlterClause.class); return new AlterTableStmt(targetTableName, alterClauses, pos); } } @Override public ParseNode visitCancelAlterTableStatement(StarRocksParser.CancelAlterTableStatementContext context) { ShowAlterStmt.AlterType alterType; if (context.ROLLUP() != null) { alterType = ShowAlterStmt.AlterType.ROLLUP; } else if (context.MATERIALIZED() != null && context.VIEW() != null) { alterType = ShowAlterStmt.AlterType.MATERIALIZED_VIEW; } else if (context.OPTIMIZE() != null) { alterType = ShowAlterStmt.AlterType.OPTIMIZE; } else { alterType = ShowAlterStmt.AlterType.COLUMN; } QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName dbTableName = qualifiedNameToTableName(qualifiedName); List<Long> alterJobIdList = null; if (context.INTEGER_VALUE() != null) { alterJobIdList = context.INTEGER_VALUE() .stream().map(ParseTree::getText).map(Long::parseLong).collect(toList()); } return new CancelAlterTableStmt(alterType, dbTableName, alterJobIdList, createPos(context)); } @Override public ParseNode visitShowAlterStatement(StarRocksParser.ShowAlterStatementContext context) { QualifiedName dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } ShowAlterStmt.AlterType alterType; if (context.ROLLUP() != null) { alterType = ShowAlterStmt.AlterType.ROLLUP; } else if (context.MATERIALIZED() != null && context.VIEW() != null) { alterType = ShowAlterStmt.AlterType.MATERIALIZED_VIEW; } else if (context.OPTIMIZE() != null) { alterType = ShowAlterStmt.AlterType.OPTIMIZE; } else { alterType = ShowAlterStmt.AlterType.COLUMN; } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowAlterStmt(alterType, dbName == null ? null : dbName.toString(), where, orderByElements, limitElement, createPos(context)); } @Override public ParseNode visitCreateViewStatement(StarRocksParser.CreateViewStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); List<ColWithComment> colWithComments = null; if (context.columnNameWithComment().size() > 0) { colWithComments = visit(context.columnNameWithComment(), ColWithComment.class); } if (context.IF() != null && context.REPLACE() != null) { throw new ParsingException(PARSER_ERROR_MSG.conflictedOptions("if not exists", "or replace"), createPos(context)); } return new CreateViewStmt( context.IF() != null, context.REPLACE() != null, targetTableName, colWithComments, context.comment() == null ? null : ((StringLiteral) visit(context.comment())).getStringValue(), (QueryStatement) visit(context.queryStatement()), createPos(context)); } @Override public ParseNode visitAlterViewStatement(StarRocksParser.AlterViewStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); List<ColWithComment> colWithComments = null; if (context.columnNameWithComment().size() > 0) { colWithComments = visit(context.columnNameWithComment(), ColWithComment.class); } QueryStatement queryStatement = (QueryStatement) visit(context.queryStatement()); AlterClause alterClause = new AlterViewClause(colWithComments, queryStatement, createPos(context)); return new AlterViewStmt(targetTableName, alterClause, createPos(context)); } @Override public ParseNode visitDropViewStatement(StarRocksParser.DropViewStatementContext context) { boolean ifExists = context.IF() != null && context.EXISTS() != null; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new DropTableStmt(ifExists, targetTableName, true, false, createPos(context)); } @Override public ParseNode visitShowPartitionsStatement(StarRocksParser.ShowPartitionsStatementContext context) { boolean temp = context.TEMPORARY() != null; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } List<OrderByElement> orderByElements = new ArrayList<>(); if (context.ORDER() != null) { orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowPartitionsStmt(tableName, where, orderByElements, limitElement, temp, createPos(context)); } @Override public ParseNode visitRecoverPartitionStatement(StarRocksParser.RecoverPartitionStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); String partitionName = ((Identifier) visit(context.identifier())).getValue(); return new RecoverPartitionStmt(tableName, partitionName, createPos(context)); } @Override public ParseNode visitShowTabletStatement(StarRocksParser.ShowTabletStatementContext context) { NodePosition pos = createPos(context); if (context.INTEGER_VALUE() != null) { return new ShowTabletStmt(null, Long.parseLong(context.INTEGER_VALUE().getText()), pos); } else { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName dbTblName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { partitionNames = (PartitionNames) visit(context.partitionNames()); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowTabletStmt(dbTblName, -1L, partitionNames, where, orderByElements, limitElement, createPos(context)); } } @Override public ParseNode visitCreateIndexStatement(StarRocksParser.CreateIndexStatementContext context) { String indexName = ((Identifier) visit(context.identifier())).getValue(); List<Identifier> columnList = visit(context.identifierList().identifier(), Identifier.class); Token idxStart = context.identifier().start; Token idxStop = context.identifierList().stop; String comment = null; if (context.comment() != null) { comment = ((StringLiteral) visit(context.comment())).getStringValue(); idxStop = context.comment().stop; } NodePosition idxPos = createPos(idxStart, idxStop); IndexDef indexDef = new IndexDef(indexName, columnList.stream().map(Identifier::getValue).collect(toList()), getIndexType(context.indexType()), comment, getPropertyList(context.propertyList()), idxPos); CreateIndexClause createIndexClause = new CreateIndexClause(indexDef, idxPos); QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new AlterTableStmt(targetTableName, Lists.newArrayList(createIndexClause), createPos(context)); } @Override public ParseNode visitDropIndexStatement(StarRocksParser.DropIndexStatementContext context) { Identifier identifier = (Identifier) visit(context.identifier()); DropIndexClause dropIndexClause = new DropIndexClause(identifier.getValue(), createPos(context.identifier())); QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new AlterTableStmt(targetTableName, Lists.newArrayList(dropIndexClause), createPos(context)); } @Override public ParseNode visitShowIndexStatement(StarRocksParser.ShowIndexStatementContext context) { QualifiedName tableName = getQualifiedName(context.table); QualifiedName dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db); } return new ShowIndexStmt(dbName == null ? null : dbName.toString(), qualifiedNameToTableName(tableName), createPos(context)); } private Map<String, String> buildProperties(StarRocksParser.PropertiesContext properties) { Map<String, String> result = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); if (properties != null) { List<Property> propertyList = visit(properties.property(), Property.class); for (Property property : ListUtils.emptyIfNull(propertyList)) { result.put(property.getKey(), property.getValue()); } } return result; } @Override public ParseNode visitSubmitTaskStatement(StarRocksParser.SubmitTaskStatementContext context) { QualifiedName qualifiedName = null; if (context.qualifiedName() != null) { qualifiedName = getQualifiedName(context.qualifiedName()); } Map<String, String> properties = buildProperties(context.properties()); properties.putAll(extractVarHintValues(hintMap.get(context))); CreateTableAsSelectStmt createTableAsSelectStmt = null; InsertStmt insertStmt = null; if (context.createTableAsSelectStatement() != null) { createTableAsSelectStmt = (CreateTableAsSelectStmt) visit(context.createTableAsSelectStatement()); } else if (context.insertStatement() != null) { insertStmt = (InsertStmt) visit(context.insertStatement()); } int startIndex = 0; if (createTableAsSelectStmt != null) { startIndex = context.createTableAsSelectStatement().start.getStartIndex(); } else { startIndex = context.insertStatement().start.getStartIndex(); } NodePosition pos = createPos(context); TaskName taskName; if (qualifiedName == null) { taskName = new TaskName(null, null, pos); } else { taskName = qualifiedNameToTaskName(qualifiedName); } if (createTableAsSelectStmt != null) { return new SubmitTaskStmt(taskName, properties, startIndex, createTableAsSelectStmt, pos); } else { return new SubmitTaskStmt(taskName, properties, startIndex, insertStmt, pos); } } @Override public ParseNode visitDropTaskStatement(StarRocksParser.DropTaskStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TaskName taskName = qualifiedNameToTaskName(qualifiedName); boolean force = context.FORCE() != null; return new DropTaskStmt(taskName, force, createPos(context)); } public static final ImmutableList<String> MATERIALIZEDVIEW_REFRESHSCHEME_SUPPORT_UNIT_IDENTIFIERS = new ImmutableList.Builder<String>() .add("SECOND").add("MINUTE").add("HOUR").add("DAY") .build(); private void checkMaterializedViewAsyncRefreshSchemeUnitIdentifier( AsyncRefreshSchemeDesc asyncRefreshSchemeDesc) { if (asyncRefreshSchemeDesc.getIntervalLiteral() == null || asyncRefreshSchemeDesc.getIntervalLiteral().getUnitIdentifier() == null) { return; } String unit = asyncRefreshSchemeDesc.getIntervalLiteral().getUnitIdentifier().getDescription(); if (StringUtils.isEmpty(unit)) { return; } if (!MATERIALIZEDVIEW_REFRESHSCHEME_SUPPORT_UNIT_IDENTIFIERS.contains(unit)) { throw new ParsingException(PARSER_ERROR_MSG.forbidClauseInMV("Refresh interval unit", unit), asyncRefreshSchemeDesc.getIntervalLiteral().getUnitIdentifier().getPos()); } } @Override public ParseNode visitCreateMaterializedViewStatement( StarRocksParser.CreateMaterializedViewStatementContext context) { boolean ifNotExist = context.IF() != null; QualifiedName qualifiedName = getQualifiedName(context.mvName); TableName tableName = qualifiedNameToTableName(qualifiedName); List<ColWithComment> colWithComments = null; if (!context.columnNameWithComment().isEmpty()) { colWithComments = visit(context.columnNameWithComment(), ColWithComment.class); } String comment = context.comment() == null ? null : ((StringLiteral) visit(context.comment().string())).getStringValue(); QueryStatement queryStatement = (QueryStatement) visit(context.queryStatement()); RefreshSchemeClause refreshSchemeDesc = null; Map<String, String> properties = new HashMap<>(); ExpressionPartitionDesc expressionPartitionDesc = null; DistributionDesc distributionDesc = null; List<String> sortKeys = null; for (StarRocksParser.MaterializedViewDescContext desc : ListUtils.emptyIfNull(context.materializedViewDesc())) { NodePosition clausePos = createPos(desc); if (desc.properties() != null) { if (MapUtils.isNotEmpty(properties)) { throw new ParsingException(PARSER_ERROR_MSG.duplicatedClause("PROPERTY"), clausePos); } List<Property> propertyList = visit(desc.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } if (desc.refreshSchemeDesc() != null) { if (refreshSchemeDesc != null) { throw new ParsingException(PARSER_ERROR_MSG.duplicatedClause("REFRESH"), clausePos); } refreshSchemeDesc = ((RefreshSchemeClause) visit(desc.refreshSchemeDesc())); } if (desc.primaryExpression() != null) { if (expressionPartitionDesc != null) { throw new ParsingException(PARSER_ERROR_MSG.duplicatedClause("PARTITION"), clausePos); } Expr expr = (Expr) visit(desc.primaryExpression()); if (expr instanceof SlotRef) { expressionPartitionDesc = new ExpressionPartitionDesc(expr); } else if (expr instanceof FunctionCallExpr) { AnalyzerUtils.checkAndExtractPartitionCol((FunctionCallExpr) expr, null); expressionPartitionDesc = new ExpressionPartitionDesc(expr); } else { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(expr.toSql(), "PARTITION BY"), expr.getPos()); } } if (desc.distributionDesc() != null) { if (distributionDesc != null) { throw new ParsingException(PARSER_ERROR_MSG.duplicatedClause("DISTRIBUTION"), clausePos); } distributionDesc = (DistributionDesc) visit(desc.distributionDesc()); } if (desc.orderByDesc() != null) { sortKeys = visit(desc.orderByDesc().identifierList().identifier(), Identifier.class) .stream().map(Identifier::getValue).collect(toList()); } } if (refreshSchemeDesc == null) { if (distributionDesc == null) { refreshSchemeDesc = new SyncRefreshSchemeDesc(); } else { refreshSchemeDesc = new ManualRefreshSchemeDesc(MaterializedView.RefreshMoment.IMMEDIATE, NodePosition.ZERO); } } if (refreshSchemeDesc instanceof SyncRefreshSchemeDesc) { if (expressionPartitionDesc != null) { throw new ParsingException(PARSER_ERROR_MSG.forbidClauseInMV("SYNC refresh type", "PARTITION BY"), expressionPartitionDesc.getPos()); } if (distributionDesc != null) { throw new ParsingException(PARSER_ERROR_MSG.forbidClauseInMV("SYNC refresh type", "DISTRIBUTION BY"), distributionDesc.getPos()); } return new CreateMaterializedViewStmt(tableName, queryStatement, properties); } if (refreshSchemeDesc instanceof AsyncRefreshSchemeDesc) { AsyncRefreshSchemeDesc asyncRefreshSchemeDesc = (AsyncRefreshSchemeDesc) refreshSchemeDesc; checkMaterializedViewAsyncRefreshSchemeUnitIdentifier(asyncRefreshSchemeDesc); } if (!Config.enable_experimental_mv) { throw new ParsingException(PARSER_ERROR_MSG.feConfigDisable("enable_experimental_mv"), NodePosition.ZERO); } return new CreateMaterializedViewStatement(tableName, ifNotExist, colWithComments, context.indexDesc() == null ? null : getIndexDefs(context.indexDesc()), comment, refreshSchemeDesc, expressionPartitionDesc, distributionDesc, sortKeys, properties, queryStatement, createPos(context)); } @Override public ParseNode visitShowMaterializedViewsStatement( StarRocksParser.ShowMaterializedViewsStatementContext context) { String database = null; NodePosition pos = createPos(context); if (context.qualifiedName() != null) { database = getQualifiedName(context.qualifiedName()).toString(); } if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new ShowMaterializedViewsStmt(database, stringLiteral.getValue(), null, pos); } else if (context.expression() != null) { return new ShowMaterializedViewsStmt(database, null, (Expr) visit(context.expression()), pos); } else { return new ShowMaterializedViewsStmt(database, null, null, pos); } } @Override public ParseNode visitDropMaterializedViewStatement(StarRocksParser.DropMaterializedViewStatementContext context) { QualifiedName mvQualifiedName = getQualifiedName(context.qualifiedName()); TableName mvName = qualifiedNameToTableName(mvQualifiedName); return new DropMaterializedViewStmt(context.IF() != null, mvName, createPos(context)); } @Override public ParseNode visitAlterMaterializedViewStatement( StarRocksParser.AlterMaterializedViewStatementContext context) { QualifiedName mvQualifiedName = getQualifiedName(context.qualifiedName()); TableName mvName = qualifiedNameToTableName(mvQualifiedName); AlterTableClause alterTableClause = null; if (context.tableRenameClause() != null) { alterTableClause = (TableRenameClause) visit(context.tableRenameClause()); } if (context.refreshSchemeDesc() != null) { alterTableClause = ((RefreshSchemeClause) visit(context.refreshSchemeDesc())); if (alterTableClause instanceof AsyncRefreshSchemeDesc) { AsyncRefreshSchemeDesc asyncRefreshSchemeDesc = (AsyncRefreshSchemeDesc) alterTableClause; checkMaterializedViewAsyncRefreshSchemeUnitIdentifier(asyncRefreshSchemeDesc); } } if (context.modifyPropertiesClause() != null) { alterTableClause = (ModifyTablePropertiesClause) visit(context.modifyPropertiesClause()); } if (context.statusDesc() != null) { String status = context.statusDesc().getText(); alterTableClause = new AlterMaterializedViewStatusClause(status, createPos(context)); } if (context.swapTableClause() != null) { alterTableClause = (SwapTableClause) visit(context.swapTableClause()); } return new AlterMaterializedViewStmt(mvName, alterTableClause, createPos(context)); } @Override public ParseNode visitRefreshMaterializedViewStatement( StarRocksParser.RefreshMaterializedViewStatementContext context) { QualifiedName mvQualifiedName = getQualifiedName(context.qualifiedName()); TableName mvName = qualifiedNameToTableName(mvQualifiedName); PartitionRangeDesc partitionRangeDesc = null; if (context.partitionRangeDesc() != null) { partitionRangeDesc = (PartitionRangeDesc) visit(context.partitionRangeDesc()); } return new RefreshMaterializedViewStatement(mvName, partitionRangeDesc, context.FORCE() != null, context.SYNC() != null, createPos(context)); } @Override public ParseNode visitCancelRefreshMaterializedViewStatement( StarRocksParser.CancelRefreshMaterializedViewStatementContext context) { QualifiedName mvQualifiedName = getQualifiedName(context.qualifiedName()); TableName mvName = qualifiedNameToTableName(mvQualifiedName); return new CancelRefreshMaterializedViewStmt(mvName, createPos(context)); } @Override public ParseNode visitCreateExternalCatalogStatement( StarRocksParser.CreateExternalCatalogStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String catalogName = identifier.getValue(); String comment = null; if (context.comment() != null) { comment = ((StringLiteral) visit(context.comment())).getStringValue(); } Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new CreateCatalogStmt(catalogName, comment, properties, createPos(context)); } @Override public ParseNode visitDropExternalCatalogStatement(StarRocksParser.DropExternalCatalogStatementContext context) { Identifier identifier = (Identifier) visit(context.catalogName); String catalogName = identifier.getValue(); return new DropCatalogStmt(catalogName, createPos(context)); } @Override public ParseNode visitShowCreateExternalCatalogStatement( StarRocksParser.ShowCreateExternalCatalogStatementContext context) { Identifier identifier = (Identifier) visit(context.catalogName); String catalogName = identifier.getValue(); return new ShowCreateExternalCatalogStmt(catalogName, createPos(context)); } @Override public ParseNode visitShowCatalogsStatement(StarRocksParser.ShowCatalogsStatementContext context) { return new ShowCatalogsStmt(createPos(context)); } @Override public ParseNode visitAlterCatalogStatement(StarRocksParser.AlterCatalogStatementContext context) { String catalogName = ((Identifier) visit(context.catalogName)).getValue(); AlterClause alterClause = (AlterClause) visit(context.modifyPropertiesClause()); return new AlterCatalogStmt(catalogName, alterClause, createPos(context)); } @Override public ParseNode visitShowWarehousesStatement(StarRocksParser.ShowWarehousesStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowWarehousesStmt(pattern, where, createPos(context)); } @Override public ParseNode visitInsertStatement(StarRocksParser.InsertStatementContext context) { QueryStatement queryStatement; if (context.VALUES() != null) { List<ValueList> rowValues = visit(context.expressionsWithDefault(), ValueList.class); List<List<Expr>> rows = rowValues.stream().map(ValueList::getRow).collect(toList()); List<String> colNames = new ArrayList<>(); for (int i = 0; i < rows.get(0).size(); ++i) { colNames.add("column_" + i); } queryStatement = new QueryStatement(new ValuesRelation(rows, colNames, createPos(context.VALUES().getSymbol(), context.stop))); } else { queryStatement = (QueryStatement) visit(context.queryStatement()); } if (context.explainDesc() != null) { queryStatement.setIsExplain(true, getExplainType(context.explainDesc())); } if (context.qualifiedName() != null) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { partitionNames = (PartitionNames) visit(context.partitionNames()); } InsertStmt stmt = new InsertStmt(targetTableName, partitionNames, context.label == null ? null : ((Identifier) visit(context.label)).getValue(), getColumnNames(context.columnAliases()), queryStatement, context.OVERWRITE() != null, createPos(context)); stmt.setHintNodes(hintMap.get(context)); return stmt; } if (context.BLACKHOLE() != null) { return new InsertStmt(queryStatement, createPos(context)); } Map<String, String> tableFunctionProperties = getPropertyList(context.propertyList()); InsertStmt res = new InsertStmt(tableFunctionProperties, queryStatement, createPos(context)); res.setHintNodes(hintMap.get(context)); return res; } @Override public ParseNode visitUpdateStatement(StarRocksParser.UpdateStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); List<ColumnAssignment> assignments = visit(context.assignmentList().assignment(), ColumnAssignment.class); List<Relation> fromRelations = null; if (context.fromClause() instanceof StarRocksParser.DualContext) { ValuesRelation valuesRelation = ValuesRelation.newDualRelation(createPos(context.fromClause())); fromRelations = Lists.newArrayList(valuesRelation); } else { StarRocksParser.FromContext fromContext = (StarRocksParser.FromContext) context.fromClause(); if (fromContext.relations() != null) { fromRelations = visit(fromContext.relations().relation(), Relation.class); } } Expr where = context.where != null ? (Expr) visit(context.where) : null; List<CTERelation> ctes = null; if (context.withClause() != null) { ctes = visit(context.withClause().commonTableExpression(), CTERelation.class); } UpdateStmt ret = new UpdateStmt(targetTableName, assignments, fromRelations, where, ctes, createPos(context)); if (context.explainDesc() != null) { ret.setIsExplain(true, getExplainType(context.explainDesc())); if (StatementBase.ExplainLevel.ANALYZE.equals(ret.getExplainLevel())) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedOp("analyze")); } } ret.setHintNodes(hintMap.get(context)); return ret; } @Override public ParseNode visitDeleteStatement(StarRocksParser.DeleteStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { partitionNames = (PartitionNames) visit(context.partitionNames()); } List<Relation> usingRelations = context.using != null ? visit(context.using.relation(), Relation.class) : null; Expr where = context.where != null ? (Expr) visit(context.where) : null; List<CTERelation> ctes = null; if (context.withClause() != null) { ctes = visit(context.withClause().commonTableExpression(), CTERelation.class); } DeleteStmt ret = new DeleteStmt(targetTableName, partitionNames, usingRelations, where, ctes, createPos(context)); if (context.explainDesc() != null) { ret.setIsExplain(true, getExplainType(context.explainDesc())); if (StatementBase.ExplainLevel.ANALYZE.equals(ret.getExplainLevel())) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedOp("analyze")); } } ret.setHintNodes(hintMap.get(context)); return ret; } @Override public ParseNode visitCreateRoutineLoadStatement(StarRocksParser.CreateRoutineLoadStatementContext context) { QualifiedName tableName = null; if (context.table != null) { tableName = getQualifiedName(context.table); } List<StarRocksParser.LoadPropertiesContext> loadPropertiesContexts = context.loadProperties(); List<ParseNode> loadPropertyList = getLoadPropertyList(loadPropertiesContexts); String typeName = context.source.getText(); Map<String, String> jobProperties = getJobProperties(context.jobProperties()); Map<String, String> dataSourceProperties = getDataSourceProperties(context.dataSourceProperties()); return new CreateRoutineLoadStmt(createLabelName(context.db, context.name), tableName == null ? null : tableName.toString(), loadPropertyList, jobProperties, typeName, dataSourceProperties, createPos(context)); } @Override public ParseNode visitShowCreateRoutineLoadStatement( StarRocksParser.ShowCreateRoutineLoadStatementContext context) { return new ShowCreateRoutineLoadStmt(createLabelName(context.db, context.name)); } @Override public ParseNode visitAlterRoutineLoadStatement(StarRocksParser.AlterRoutineLoadStatementContext context) { NodePosition pos = createPos(context); List<StarRocksParser.LoadPropertiesContext> loadPropertiesContexts = context.loadProperties(); List<ParseNode> loadPropertyList = getLoadPropertyList(loadPropertiesContexts); Map<String, String> jobProperties = getJobProperties(context.jobProperties()); if (context.dataSource() != null) { String typeName = context.dataSource().source.getText(); Map<String, String> dataSourceProperties = getDataSourceProperties(context.dataSource().dataSourceProperties()); RoutineLoadDataSourceProperties dataSource = new RoutineLoadDataSourceProperties(typeName, dataSourceProperties, createPos(context.dataSource())); return new AlterRoutineLoadStmt(createLabelName(context.db, context.name), loadPropertyList, jobProperties, dataSource, pos); } return new AlterRoutineLoadStmt(createLabelName(context.db, context.name), loadPropertyList, jobProperties, new RoutineLoadDataSourceProperties(), pos); } @Override public ParseNode visitAlterLoadStatement(StarRocksParser.AlterLoadStatementContext context) { Map<String, String> jobProperties = getJobProperties(context.jobProperties()); return new AlterLoadStmt(createLabelName(context.db, context.name), jobProperties, createPos(context)); } @Override public ParseNode visitStopRoutineLoadStatement(StarRocksParser.StopRoutineLoadStatementContext context) { return new StopRoutineLoadStmt(createLabelName(context.db, context.name), createPos(context)); } @Override public ParseNode visitResumeRoutineLoadStatement(StarRocksParser.ResumeRoutineLoadStatementContext context) { return new ResumeRoutineLoadStmt(createLabelName(context.db, context.name), createPos(context)); } @Override public ParseNode visitPauseRoutineLoadStatement(StarRocksParser.PauseRoutineLoadStatementContext context) { return new PauseRoutineLoadStmt(createLabelName(context.db, context.name), createPos(context)); } @Override public ParseNode visitShowRoutineLoadStatement(StarRocksParser.ShowRoutineLoadStatementContext context) { boolean isVerbose = context.ALL() != null; String database = null; Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowRoutineLoadStmt(createLabelName(context.db, context.name), isVerbose, where, orderByElements, limitElement, createPos(context)); } @Override public ParseNode visitShowRoutineLoadTaskStatement(StarRocksParser.ShowRoutineLoadTaskStatementContext context) { QualifiedName dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowRoutineLoadTaskStmt(dbName == null ? null : dbName.toString(), where, createPos(context)); } @Override public ParseNode visitShowStreamLoadStatement(StarRocksParser.ShowStreamLoadStatementContext context) { boolean isVerbose = context.ALL() != null; String database = null; Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowStreamLoadStmt(createLabelName(context.db, context.name), isVerbose, where, orderByElements, limitElement, createPos(context)); } @Override public ParseNode visitAdminSetConfigStatement(StarRocksParser.AdminSetConfigStatementContext context) { Property config = (Property) visitProperty(context.property()); return new AdminSetConfigStmt(AdminSetConfigStmt.ConfigType.FRONTEND, config, createPos(context)); } @Override public ParseNode visitAdminSetReplicaStatusStatement( StarRocksParser.AdminSetReplicaStatusStatementContext context) { List<Property> propertyList = visit(context.properties().property(), Property.class); return new AdminSetReplicaStatusStmt(new PropertySet(propertyList, createPos(context.properties())), createPos(context)); } @Override public ParseNode visitAdminShowConfigStatement(StarRocksParser.AdminShowConfigStatementContext context) { NodePosition pos = createPos(context); if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new AdminShowConfigStmt(AdminSetConfigStmt.ConfigType.FRONTEND, stringLiteral.getValue(), pos); } return new AdminShowConfigStmt(AdminSetConfigStmt.ConfigType.FRONTEND, null, pos); } @Override public ParseNode visitAdminShowReplicaDistributionStatement( StarRocksParser.AdminShowReplicaDistributionStatementContext context) { Token start = context.qualifiedName().start; Token stop = context.qualifiedName().stop; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } return new AdminShowReplicaDistributionStmt(new TableRef(targetTableName, null, partitionNames, createPos(start, stop)), createPos(context)); } @Override public ParseNode visitAdminShowReplicaStatusStatement( StarRocksParser.AdminShowReplicaStatusStatementContext context) { Token start = context.qualifiedName().start; Token stop = context.qualifiedName().stop; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); Expr where = context.where != null ? (Expr) visit(context.where) : null; PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } return new AdminShowReplicaStatusStmt( new TableRef(targetTableName, null, partitionNames, createPos(start, stop)), where, createPos(context)); } @Override public ParseNode visitAdminRepairTableStatement(StarRocksParser.AdminRepairTableStatementContext context) { Token start = context.qualifiedName().start; Token stop = context.qualifiedName().stop; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } return new AdminRepairTableStmt(new TableRef(targetTableName, null, partitionNames, createPos(start, stop)), createPos(context)); } @Override public ParseNode visitAdminCancelRepairTableStatement( StarRocksParser.AdminCancelRepairTableStatementContext context) { Token start = context.qualifiedName().start; Token stop = context.qualifiedName().stop; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } return new AdminCancelRepairTableStmt( new TableRef(targetTableName, null, partitionNames, createPos(start, stop)), createPos(context)); } @Override public ParseNode visitAdminCheckTabletsStatement(StarRocksParser.AdminCheckTabletsStatementContext context) { List<Long> tabletIds = Lists.newArrayList(); if (context.tabletList() != null) { tabletIds = context.tabletList().INTEGER_VALUE().stream().map(ParseTree::getText) .map(Long::parseLong).collect(toList()); } return new AdminCheckTabletsStmt(tabletIds, (Property) visitProperty(context.property()), createPos(context)); } @Override public ParseNode visitKillStatement(StarRocksParser.KillStatementContext context) { NodePosition pos = createPos(context); long id = Long.parseLong(context.INTEGER_VALUE().getText()); if (context.QUERY() != null) { return new KillStmt(false, id, pos); } else { return new KillStmt(true, id, pos); } } @Override public ParseNode visitSyncStatement(StarRocksParser.SyncStatementContext context) { return new SyncStmt(createPos(context)); } @Override public ParseNode visitAlterSystemStatement(StarRocksParser.AlterSystemStatementContext context) { return new AlterSystemStmt((AlterClause) visit(context.alterClause()), createPos(context)); } @Override public ParseNode visitCancelAlterSystemStatement(StarRocksParser.CancelAlterSystemStatementContext context) { return new CancelAlterSystemStmt(visit(context.string(), StringLiteral.class) .stream().map(StringLiteral::getValue).collect(toList()), createPos(context)); } @Override public ParseNode visitShowComputeNodesStatement(StarRocksParser.ShowComputeNodesStatementContext context) { return new ShowComputeNodesStmt(createPos(context)); } @Override public ParseNode visitAnalyzeStatement(StarRocksParser.AnalyzeStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); List<Identifier> columns = visitIfPresent(context.identifier(), Identifier.class); List<String> columnNames = null; if (columns != null) { columnNames = columns.stream().map(Identifier::getValue).collect(toList()); } Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new AnalyzeStmt(tableName, columnNames, properties, context.SAMPLE() != null, context.ASYNC() != null, new AnalyzeBasicDesc(), createPos(context)); } @Override public ParseNode visitDropStatsStatement(StarRocksParser.DropStatsStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); return new DropStatsStmt(tableName, createPos(context)); } @Override public ParseNode visitCreateAnalyzeStatement(StarRocksParser.CreateAnalyzeStatementContext context) { NodePosition pos = createPos(context); Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } if (context.DATABASE() != null) { return new CreateAnalyzeJobStmt(((Identifier) visit(context.db)).getValue(), context.FULL() == null, properties, pos); } else if (context.TABLE() != null) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); List<Identifier> columns = visitIfPresent(context.identifier(), Identifier.class); List<String> columnNames = null; if (columns != null) { columnNames = columns.stream().map(Identifier::getValue).collect(toList()); } return new CreateAnalyzeJobStmt(tableName, columnNames, context.SAMPLE() != null, properties, pos); } else { return new CreateAnalyzeJobStmt(context.FULL() == null, properties, pos); } } @Override public ParseNode visitDropAnalyzeJobStatement(StarRocksParser.DropAnalyzeJobStatementContext context) { return new DropAnalyzeJobStmt(Long.parseLong(context.INTEGER_VALUE().getText()), createPos(context)); } @Override public ParseNode visitShowAnalyzeStatement(StarRocksParser.ShowAnalyzeStatementContext context) { Predicate predicate = null; NodePosition pos = createPos(context); if (context.expression() != null) { predicate = (Predicate) visit(context.expression()); } if (context.STATUS() != null) { return new ShowAnalyzeStatusStmt(predicate, pos); } else if (context.JOB() != null) { return new ShowAnalyzeJobStmt(predicate, pos); } else { return new ShowAnalyzeJobStmt(predicate, pos); } } @Override public ParseNode visitShowStatsMetaStatement(StarRocksParser.ShowStatsMetaStatementContext context) { Predicate predicate = null; if (context.expression() != null) { predicate = (Predicate) visit(context.expression()); } return new ShowBasicStatsMetaStmt(predicate, createPos(context)); } @Override public ParseNode visitShowHistogramMetaStatement(StarRocksParser.ShowHistogramMetaStatementContext context) { Predicate predicate = null; if (context.expression() != null) { predicate = (Predicate) visit(context.expression()); } return new ShowHistogramStatsMetaStmt(predicate, createPos(context)); } @Override public ParseNode visitAnalyzeHistogramStatement(StarRocksParser.AnalyzeHistogramStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); List<Identifier> columns = visitIfPresent(context.identifier(), Identifier.class); List<String> columnNames = null; if (columns != null) { columnNames = columns.stream().map(Identifier::getValue).collect(toList()); } Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } long bucket; if (context.bucket != null) { bucket = Long.parseLong(context.bucket.getText()); } else { bucket = Config.histogram_buckets_size; } return new AnalyzeStmt(tableName, columnNames, properties, true, context.ASYNC() != null, new AnalyzeHistogramDesc(bucket), createPos(context)); } @Override public ParseNode visitDropHistogramStatement(StarRocksParser.DropHistogramStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); List<Identifier> columns = visitIfPresent(context.identifier(), Identifier.class); List<String> columnNames = null; if (columns != null) { columnNames = columns.stream().map(Identifier::getValue).collect(toList()); } return new DropHistogramStmt(tableName, columnNames, createPos(context)); } @Override public ParseNode visitKillAnalyzeStatement(StarRocksParser.KillAnalyzeStatementContext context) { return new KillAnalyzeStmt(Long.parseLong(context.INTEGER_VALUE().getText()), createPos(context)); } @Override public ParseNode visitAnalyzeProfileStatement(StarRocksParser.AnalyzeProfileStatementContext context) { StringLiteral stringLiteral = (StringLiteral) visit(context.string()); List<Integer> planNodeIds = Lists.newArrayList(); if (context.INTEGER_VALUE() != null) { planNodeIds = context.INTEGER_VALUE().stream() .map(ParseTree::getText) .map(Integer::parseInt) .collect(toList()); } return new AnalyzeProfileStmt(stringLiteral.getStringValue(), planNodeIds, createPos(context)); } public ParseNode visitCreateResourceGroupStatement(StarRocksParser.CreateResourceGroupStatementContext context) { Identifier identifier = (Identifier) visit(context.identifier()); String name = identifier.getValue(); List<List<Predicate>> predicatesList = new ArrayList<>(); for (StarRocksParser.ClassifierContext classifierContext : context.classifier()) { List<Predicate> p = visit(classifierContext.expressionList().expression(), Predicate.class); predicatesList.add(p); } Map<String, String> properties = new HashMap<>(); List<Property> propertyList = visit(context.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } return new CreateResourceGroupStmt(name, context.EXISTS() != null, context.REPLACE() != null, predicatesList, properties, createPos(context)); } @Override public ParseNode visitDropResourceGroupStatement(StarRocksParser.DropResourceGroupStatementContext context) { Identifier identifier = (Identifier) visit(context.identifier()); return new DropResourceGroupStmt(identifier.getValue(), createPos(context)); } @Override public ParseNode visitAlterResourceGroupStatement(StarRocksParser.AlterResourceGroupStatementContext context) { Identifier identifier = (Identifier) visit(context.identifier()); String name = identifier.getValue(); NodePosition pos = createPos(context); if (context.ADD() != null) { List<List<Predicate>> predicatesList = new ArrayList<>(); for (StarRocksParser.ClassifierContext classifierContext : context.classifier()) { List<Predicate> p = visit(classifierContext.expressionList().expression(), Predicate.class); predicatesList.add(p); } return new AlterResourceGroupStmt(name, new AlterResourceGroupStmt.AddClassifiers(predicatesList), pos); } else if (context.DROP() != null) { if (context.ALL() != null) { return new AlterResourceGroupStmt(name, new AlterResourceGroupStmt.DropAllClassifiers(), pos); } else { return new AlterResourceGroupStmt(name, new AlterResourceGroupStmt.DropClassifiers(context.INTEGER_VALUE() .stream().map(ParseTree::getText).map(Long::parseLong).collect(toList())), pos); } } else { Map<String, String> properties = new HashMap<>(); List<Property> propertyList = visit(context.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } return new AlterResourceGroupStmt(name, new AlterResourceGroupStmt.AlterProperties(properties), pos); } } @Override public ParseNode visitShowResourceGroupStatement(StarRocksParser.ShowResourceGroupStatementContext context) { NodePosition pos = createPos(context); if (context.GROUPS() != null) { return new ShowResourceGroupStmt(null, context.ALL() != null, pos); } else { Identifier identifier = (Identifier) visit(context.identifier()); return new ShowResourceGroupStmt(identifier.getValue(), false, pos); } } public ParseNode visitCreateResourceStatement(StarRocksParser.CreateResourceStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new CreateResourceStmt(context.EXTERNAL() != null, identifier.getValue(), properties, createPos(context)); } public ParseNode visitDropResourceStatement(StarRocksParser.DropResourceStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); return new DropResourceStmt(identifier.getValue(), createPos(context)); } public ParseNode visitAlterResourceStatement(StarRocksParser.AlterResourceStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new AlterResourceStmt(identifier.getValue(), properties, createPos(context)); } public ParseNode visitShowResourceStatement(StarRocksParser.ShowResourceStatementContext context) { return new ShowResourcesStmt(createPos(context)); } @Override public ParseNode visitLoadStatement(StarRocksParser.LoadStatementContext context) { NodePosition pos = createPos(context); LabelName label = getLabelName(context.labelName()); List<DataDescription> dataDescriptions = null; if (context.data != null) { dataDescriptions = context.data.dataDesc().stream().map(this::getDataDescription) .collect(toList()); } Map<String, String> properties = null; if (context.props != null) { properties = Maps.newHashMap(); List<Property> propertyList = visit(context.props.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } if (context.resource != null) { ResourceDesc resourceDesc = getResourceDesc(context.resource); return new LoadStmt(label, dataDescriptions, resourceDesc, properties, pos); } BrokerDesc brokerDesc = getBrokerDesc(context.broker); String cluster = null; if (context.system != null) { cluster = ((Identifier) visit(context.system)).getValue(); } LoadStmt stmt = new LoadStmt(label, dataDescriptions, brokerDesc, cluster, properties, pos); stmt.setHintNodes(hintMap.get(context)); return stmt; } private LabelName getLabelName(StarRocksParser.LabelNameContext context) { String label = ((Identifier) visit(context.label)).getValue(); String db = ""; if (context.db != null) { db = ((Identifier) visit(context.db)).getValue(); } return new LabelName(db, label, createPos(context)); } private DataDescription getDataDescription(StarRocksParser.DataDescContext context) { NodePosition pos = createPos(context); String dstTableName = ((Identifier) visit(context.dstTableName)).getValue(); PartitionNames partitionNames = (PartitionNames) visitIfPresent(context.partitions); Expr whereExpr = (Expr) visitIfPresent(context.where); List<Expr> colMappingList = null; if (context.colMappingList != null) { colMappingList = visit(context.colMappingList.expressionList().expression(), Expr.class); } if (context.srcTableName != null) { String srcTableName = ((Identifier) visit(context.srcTableName)).getValue(); return new DataDescription(dstTableName, partitionNames, srcTableName, context.NEGATIVE() != null, colMappingList, whereExpr, pos); } List<String> files = context.srcFiles.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()) .collect(toList()); ColumnSeparator colSep = getColumnSeparator(context.colSep); RowDelimiter rowDelimiter = getRowDelimiter(context.rowSep); String format = null; if (context.format != null) { if (context.format.identifier() != null) { format = ((Identifier) visit(context.format.identifier())).getValue(); } else if (context.format.string() != null) { format = ((StringLiteral) visit(context.format.string())).getStringValue(); } } List<String> colList = null; if (context.colList != null) { List<Identifier> identifiers = visit(context.colList.identifier(), Identifier.class); colList = identifiers.stream().map(Identifier::getValue).collect(toList()); } List<String> colFromPath = null; if (context.colFromPath != null) { List<Identifier> identifiers = visit(context.colFromPath.identifier(), Identifier.class); colFromPath = identifiers.stream().map(Identifier::getValue).collect(toList()); } StarRocksParser.FormatPropsContext formatPropsContext; CsvFormat csvFormat; if (context.formatPropsField != null) { formatPropsContext = context.formatProps(); String escape = null; if (formatPropsContext.escapeCharacter != null) { StringLiteral stringLiteral = (StringLiteral) visit(formatPropsContext.escapeCharacter); escape = stringLiteral.getValue(); } String enclose = null; if (formatPropsContext.encloseCharacter != null) { StringLiteral stringLiteral = (StringLiteral) visit(formatPropsContext.encloseCharacter); enclose = stringLiteral.getValue(); } long skipheader = 0; if (formatPropsContext.INTEGER_VALUE() != null) { skipheader = Long.parseLong(formatPropsContext.INTEGER_VALUE().getText()); if (skipheader < 0) { skipheader = 0; } } boolean trimspace = false; if (formatPropsContext.booleanValue() != null) { trimspace = Boolean.parseBoolean(formatPropsContext.booleanValue().getText()); } csvFormat = new CsvFormat(enclose == null ? 0 : (byte) enclose.charAt(0), escape == null ? 0 : (byte) escape.charAt(0), skipheader, trimspace); } else { csvFormat = new CsvFormat((byte) 0, (byte) 0, 0, false); } return new DataDescription(dstTableName, partitionNames, files, colList, colSep, rowDelimiter, format, colFromPath, context.NEGATIVE() != null, colMappingList, whereExpr, csvFormat, createPos(context)); } private ColumnSeparator getColumnSeparator(StarRocksParser.StringContext context) { if (context != null) { String sep = ((StringLiteral) visit(context)).getValue(); return new ColumnSeparator(sep); } return null; } private RowDelimiter getRowDelimiter(StarRocksParser.StringContext context) { if (context != null) { String sep = ((StringLiteral) visit(context)).getValue(); return new RowDelimiter(sep); } return null; } private BrokerDesc getBrokerDesc(StarRocksParser.BrokerDescContext context) { if (context != null) { NodePosition pos = createPos(context); Map<String, String> properties = null; if (context.props != null) { properties = Maps.newHashMap(); List<Property> propertyList = visit(context.props.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } if (context.identifierOrString() != null) { String brokerName = ((Identifier) visit(context.identifierOrString())).getValue(); return new BrokerDesc(brokerName, properties, pos); } else { return new BrokerDesc(properties, pos); } } return null; } private ResourceDesc getResourceDesc(StarRocksParser.ResourceDescContext context) { if (context != null) { String brokerName = ((Identifier) visit(context.identifierOrString())).getValue(); Map<String, String> properties = null; if (context.props != null) { properties = Maps.newHashMap(); List<Property> propertyList = visit(context.props.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new ResourceDesc(brokerName, properties, createPos(context)); } return null; } @Override public ParseNode visitShowLoadStatement(StarRocksParser.ShowLoadStatementContext context) { String db = null; if (context.identifier() != null) { db = ((Identifier) visit(context.identifier())).getValue(); } Expr labelExpr = null; if (context.expression() != null) { labelExpr = (Expr) visit(context.expression()); } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } boolean all = context.ALL() != null; ShowLoadStmt res = new ShowLoadStmt(db, labelExpr, orderByElements, limitElement, createPos(context)); res.setAll(all); return res; } @Override public ParseNode visitShowLoadWarningsStatement(StarRocksParser.ShowLoadWarningsStatementContext context) { if (context.ON() != null) { String url = ((StringLiteral) visit(context.string())).getValue(); return new ShowLoadWarningsStmt(null, url, null, null); } String db = null; if (context.identifier() != null) { db = ((Identifier) visit(context.identifier())).getValue(); } Expr labelExpr = null; if (context.expression() != null) { labelExpr = (Expr) visit(context.expression()); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowLoadWarningsStmt(db, null, labelExpr, limitElement, createPos(context)); } @Override public ParseNode visitCancelLoadStatement(StarRocksParser.CancelLoadStatementContext context) { String db = null; if (context.identifier() != null) { db = ((Identifier) visit(context.identifier())).getValue(); } Expr labelExpr = null; if (context.expression() != null) { labelExpr = (Expr) visit(context.expression()); } return new CancelLoadStmt(db, labelExpr, createPos(context)); } @Override public ParseNode visitCancelCompactionStatement(StarRocksParser.CancelCompactionStatementContext context) { Expr txnIdExpr = null; if (context.expression() != null) { txnIdExpr = (Expr) visit(context.expression()); } return new CancelCompactionStmt(txnIdExpr, createPos(context)); } @Override public ParseNode visitShowAuthorStatement(StarRocksParser.ShowAuthorStatementContext context) { return new ShowAuthorStmt(createPos(context)); } @Override public ParseNode visitShowBackendsStatement(StarRocksParser.ShowBackendsStatementContext context) { return new ShowBackendsStmt(createPos(context)); } @Override public ParseNode visitShowBrokerStatement(StarRocksParser.ShowBrokerStatementContext context) { return new ShowBrokerStmt(createPos(context)); } @Override public ParseNode visitShowCharsetStatement(StarRocksParser.ShowCharsetStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowCharsetStmt(pattern, where, createPos(context)); } @Override public ParseNode visitShowCollationStatement(StarRocksParser.ShowCollationStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowCollationStmt(pattern, where, createPos(context)); } @Override public ParseNode visitShowDeleteStatement(StarRocksParser.ShowDeleteStatementContext context) { QualifiedName dbName = null; if (context.qualifiedName() != null) { dbName = getQualifiedName(context.db); } return new ShowDeleteStmt(dbName == null ? null : dbName.toString(), createPos(context)); } @Override public ParseNode visitShowDynamicPartitionStatement(StarRocksParser.ShowDynamicPartitionStatementContext context) { QualifiedName dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db); } return new ShowDynamicPartitionStmt(dbName == null ? null : dbName.toString(), createPos(context)); } @Override public ParseNode visitShowEventsStatement(StarRocksParser.ShowEventsStatementContext context) { return new ShowEventsStmt(createPos(context)); } @Override public ParseNode visitShowEnginesStatement(StarRocksParser.ShowEnginesStatementContext context) { return new ShowEnginesStmt(createPos(context)); } @Override public ParseNode visitShowFrontendsStatement(StarRocksParser.ShowFrontendsStatementContext context) { return new ShowFrontendsStmt(createPos(context)); } @Override public ParseNode visitShowPluginsStatement(StarRocksParser.ShowPluginsStatementContext context) { return new ShowPluginsStmt(createPos(context)); } @Override public ParseNode visitShowRepositoriesStatement(StarRocksParser.ShowRepositoriesStatementContext context) { return new ShowRepositoriesStmt(createPos(context)); } @Override public ParseNode visitShowOpenTableStatement(StarRocksParser.ShowOpenTableStatementContext context) { return new ShowOpenTableStmt(createPos(context)); } @Override public ParseNode visitShowProcedureStatement(StarRocksParser.ShowProcedureStatementContext context) { NodePosition pos = createPos(context); if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new ShowProcedureStmt(stringLiteral.getValue(), null, pos); } else if (context.expression() != null) { return new ShowProcedureStmt(null, (Expr) visit(context.expression()), pos); } else { return new ShowProcedureStmt(null, null, pos); } } @Override public ParseNode visitShowProcStatement(StarRocksParser.ShowProcStatementContext context) { StringLiteral stringLiteral = (StringLiteral) visit(context.path); return new ShowProcStmt(stringLiteral.getValue(), createPos(context)); } @Override public ParseNode visitShowProcesslistStatement(StarRocksParser.ShowProcesslistStatementContext context) { boolean isShowFull = context.FULL() != null; return new ShowProcesslistStmt(isShowFull, createPos(context)); } @Override public ParseNode visitShowProfilelistStatement(StarRocksParser.ShowProfilelistStatementContext context) { int limit = context.LIMIT() != null ? Integer.parseInt(context.limit.getText()) : -1; return new ShowProfilelistStmt(limit, createPos(context)); } @Override public ParseNode visitShowRunningQueriesStatement(StarRocksParser.ShowRunningQueriesStatementContext context) { int limit = context.LIMIT() != null ? Integer.parseInt(context.limit.getText()) : -1; return new ShowRunningQueriesStmt(limit, createPos(context)); } @Override public ParseNode visitShowResourceGroupUsageStatement( StarRocksParser.ShowResourceGroupUsageStatementContext context) { if (context.GROUPS() != null) { return new ShowResourceGroupUsageStmt(null, createPos(context)); } Identifier groupName = (Identifier) visit(context.identifier()); return new ShowResourceGroupUsageStmt(groupName.getValue(), createPos(context)); } @Override public ParseNode visitShowTransactionStatement(StarRocksParser.ShowTransactionStatementContext context) { String database = null; if (context.qualifiedName() != null) { database = getQualifiedName(context.qualifiedName()).toString(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowTransactionStmt(database, where, createPos(context)); } @Override public ParseNode visitShowStatusStatement(StarRocksParser.ShowStatusStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowStatusStmt(getVariableType(context.varType()), pattern, where, createPos(context)); } @Override public ParseNode visitShowTriggersStatement(StarRocksParser.ShowTriggersStatementContext context) { return new ShowTriggersStmt(createPos(context)); } @Override public ParseNode visitShowUserPropertyStatement(StarRocksParser.ShowUserPropertyStatementContext context) { String user; String pattern; if (context.FOR() == null) { user = null; pattern = context.LIKE() == null ? null : ((StringLiteral) visit(context.string(0))).getValue(); } else { user = ((StringLiteral) visit(context.string(0))).getValue(); pattern = context.LIKE() == null ? null : ((StringLiteral) visit(context.string(1))).getValue(); } return new ShowUserPropertyStmt(user, pattern, createPos(context)); } @Override public ParseNode visitShowVariablesStatement(StarRocksParser.ShowVariablesStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowVariablesStmt(getVariableType(context.varType()), pattern, where, createPos(context)); } @Override public ParseNode visitShowWarningStatement(StarRocksParser.ShowWarningStatementContext context) { NodePosition pos = createPos(context); if (context.limitElement() != null) { return new ShowWarningStmt((LimitElement) visit(context.limitElement()), pos); } return new ShowWarningStmt(null, pos); } @Override public ParseNode visitHelpStatement(StarRocksParser.HelpStatementContext context) { String mask = ((Identifier) visit(context.identifierOrString())).getValue(); return new HelpStmt(mask, createPos(context)); } @Override public ParseNode visitBackupStatement(StarRocksParser.BackupStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); LabelName labelName = qualifiedNameToLabelName(qualifiedName); List<TableRef> tblRefs = new ArrayList<>(); for (StarRocksParser.TableDescContext tableDescContext : context.tableDesc()) { StarRocksParser.QualifiedNameContext qualifiedNameContext = tableDescContext.qualifiedName(); qualifiedName = getQualifiedName(qualifiedNameContext); TableName tableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (tableDescContext.partitionNames() != null) { partitionNames = (PartitionNames) visit(tableDescContext.partitionNames()); } TableRef tableRef = new TableRef(tableName, null, partitionNames, createPos(tableDescContext)); tblRefs.add(tableRef); } Map<String, String> properties = null; if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } String repoName = ((Identifier) visit(context.identifier())).getValue(); return new BackupStmt(labelName, repoName, tblRefs, properties, createPos(context)); } @Override public ParseNode visitCancelBackupStatement(StarRocksParser.CancelBackupStatementContext context) { return new CancelBackupStmt(((Identifier) visit(context.identifier())).getValue(), false, createPos(context)); } @Override public ParseNode visitShowBackupStatement(StarRocksParser.ShowBackupStatementContext context) { NodePosition pos = createPos(context); if (context.identifier() == null) { return new ShowBackupStmt(null, pos); } return new ShowBackupStmt(((Identifier) visit(context.identifier())).getValue(), pos); } @Override public ParseNode visitRestoreStatement(StarRocksParser.RestoreStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); LabelName labelName = qualifiedNameToLabelName(qualifiedName); List<TableRef> tblRefs = new ArrayList<>(); for (StarRocksParser.RestoreTableDescContext tableDescContext : context.restoreTableDesc()) { StarRocksParser.QualifiedNameContext qualifiedNameContext = tableDescContext.qualifiedName(); qualifiedName = getQualifiedName(qualifiedNameContext); TableName tableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (tableDescContext.partitionNames() != null) { partitionNames = (PartitionNames) visit(tableDescContext.partitionNames()); } String alias = null; if (tableDescContext.identifier() != null) { alias = ((Identifier) visit(tableDescContext.identifier())).getValue(); } TableRef tableRef = new TableRef(tableName, alias, partitionNames, createPos(tableDescContext)); tblRefs.add(tableRef); } Map<String, String> properties = null; if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } String repoName = ((Identifier) visit(context.identifier())).getValue(); return new RestoreStmt(labelName, repoName, tblRefs, properties, createPos(context)); } @Override public ParseNode visitCancelRestoreStatement(StarRocksParser.CancelRestoreStatementContext context) { return new CancelBackupStmt(((Identifier) visit(context.identifier())).getValue(), true, createPos(context)); } @Override public ParseNode visitShowRestoreStatement(StarRocksParser.ShowRestoreStatementContext context) { NodePosition pos = createPos(context); if (context.identifier() == null) { return new ShowRestoreStmt(null, null, pos); } if (context.expression() != null) { return new ShowRestoreStmt(((Identifier) visit(context.identifier())).getValue(), (Expr) visit(context.expression()), pos); } else { return new ShowRestoreStmt(((Identifier) visit(context.identifier())).getValue(), null, pos); } } @Override public ParseNode visitShowSnapshotStatement(StarRocksParser.ShowSnapshotStatementContext context) { StarRocksParser.ExpressionContext expression = context.expression(); Expr where = null; if (expression != null) { where = (Expr) visit(context.expression()); } String repoName = ((Identifier) visit(context.identifier())).getValue(); return new ShowSnapshotStmt(repoName, where, createPos(context)); } @Override public ParseNode visitCreateRepositoryStatement(StarRocksParser.CreateRepositoryStatementContext context) { boolean isReadOnly = context.READ() != null && context.ONLY() != null; Map<String, String> properties = new HashMap<>(); if (context.propertyList() != null) { List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } String location = ((StringLiteral) visit(context.location)).getValue(); String repoName = ((Identifier) visit(context.repoName)).getValue(); String brokerName = null; if (context.brokerName != null) { brokerName = ((Identifier) visit(context.brokerName)).getValue(); } return new CreateRepositoryStmt(isReadOnly, repoName, brokerName, location, properties, createPos(context)); } @Override public ParseNode visitDropRepositoryStatement(StarRocksParser.DropRepositoryStatementContext context) { return new DropRepositoryStmt(((Identifier) visit(context.identifier())).getValue(), createPos(context)); } @Override public ParseNode visitAddSqlBlackListStatement(StarRocksParser.AddSqlBlackListStatementContext context) { String sql = ((StringLiteral) visit(context.string())).getStringValue(); if (sql == null || sql.isEmpty()) { throw new ParsingException(PARSER_ERROR_MSG.emptySql(), createPos(context.string())); } return new AddSqlBlackListStmt(sql); } @Override public ParseNode visitDelSqlBlackListStatement(StarRocksParser.DelSqlBlackListStatementContext context) { List<Long> indexes = context.INTEGER_VALUE().stream().map(ParseTree::getText) .map(Long::parseLong).collect(toList()); return new DelSqlBlackListStmt(indexes, createPos(context)); } @Override public ParseNode visitShowSqlBlackListStatement(StarRocksParser.ShowSqlBlackListStatementContext context) { return new ShowSqlBlackListStmt(createPos(context)); } @Override public ParseNode visitShowWhiteListStatement(StarRocksParser.ShowWhiteListStatementContext context) { return new ShowWhiteListStmt(); } @Override public ParseNode visitAddBackendBlackListStatement(StarRocksParser.AddBackendBlackListStatementContext ctx) { List<Long> ids = ctx.INTEGER_VALUE().stream().map(ParseTree::getText).map(Long::parseLong).collect(toList()); return new AddBackendBlackListStmt(ids, createPos(ctx)); } @Override public ParseNode visitDelBackendBlackListStatement(StarRocksParser.DelBackendBlackListStatementContext ctx) { List<Long> ids = ctx.INTEGER_VALUE().stream().map(ParseTree::getText).map(Long::parseLong).collect(toList()); return new DelBackendBlackListStmt(createPos(ctx), ids); } @Override public ParseNode visitShowBackendBlackListStatement(StarRocksParser.ShowBackendBlackListStatementContext ctx) { return new ShowBackendBlackListStmt(createPos(ctx)); } @Override public ParseNode visitCreateDataCacheRuleStatement(StarRocksParser.CreateDataCacheRuleStatementContext ctx) { List<StarRocksParser.IdentifierOrStringOrStarContext> partList = ctx.dataCacheTarget().identifierOrStringOrStar(); List<String> parts = partList.stream().map(c -> ((Identifier) visit(c)).getValue()).collect(toList()); QualifiedName qualifiedName = QualifiedName.of(parts); int priority = Integer.parseInt(ctx.INTEGER_VALUE().getText()); if (ctx.MINUS_SYMBOL() != null) { priority *= -1; } Expr predicates = null; if (ctx.expression() != null) { predicates = (Expr) visit(ctx.expression()); } Map<String, String> properties = null; if (ctx.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(ctx.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new CreateDataCacheRuleStmt(qualifiedName, predicates, priority, properties, createPos(ctx)); } @Override public ParseNode visitShowDataCacheRulesStatement(StarRocksParser.ShowDataCacheRulesStatementContext ctx) { return new ShowDataCacheRulesStmt(createPos(ctx)); } @Override public ParseNode visitDropDataCacheRuleStatement(StarRocksParser.DropDataCacheRuleStatementContext ctx) { long id = Long.parseLong(ctx.INTEGER_VALUE().getText()); return new DropDataCacheRuleStmt(id, createPos(ctx)); } @Override public ParseNode visitClearDataCacheRulesStatement(StarRocksParser.ClearDataCacheRulesStatementContext ctx) { return new ClearDataCacheRulesStmt(createPos(ctx)); } @Override public ParseNode visitExportStatement(StarRocksParser.ExportStatementContext context) { StarRocksParser.QualifiedNameContext qualifiedNameContext = context.tableDesc().qualifiedName(); Token start = qualifiedNameContext.start; Token stop = qualifiedNameContext.stop; QualifiedName qualifiedName = getQualifiedName(qualifiedNameContext); TableName tableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.tableDesc().partitionNames() != null) { stop = context.tableDesc().partitionNames().stop; partitionNames = (PartitionNames) visit(context.tableDesc().partitionNames()); } TableRef tableRef = new TableRef(tableName, null, partitionNames, createPos(start, stop)); StringLiteral stringLiteral = (StringLiteral) visit(context.string()); Map<String, String> properties = null; if (context.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } BrokerDesc brokerDesc = getBrokerDesc(context.brokerDesc()); boolean sync = context.SYNC() != null; return new ExportStmt(tableRef, getColumnNames(context.columnAliases()), stringLiteral.getValue(), properties, brokerDesc, createPos(context), sync); } @Override public ParseNode visitCancelExportStatement(StarRocksParser.CancelExportStatementContext context) { String catalog = null; if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new CancelExportStmt(catalog, where, createPos(context)); } @Override public ParseNode visitShowExportStatement(StarRocksParser.ShowExportStatementContext context) { String catalog = null; if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } LimitElement le = null; if (context.limitElement() != null) { le = (LimitElement) visit(context.limitElement()); } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } Expr whereExpr = null; if (context.expression() != null) { whereExpr = (Expr) visit(context.expression()); } return new ShowExportStmt(catalog, whereExpr, orderByElements, le, createPos(context)); } @Override public ParseNode visitInstallPluginStatement(StarRocksParser.InstallPluginStatementContext context) { String pluginPath = ((Identifier) visit(context.identifierOrString())).getValue(); Map<String, String> properties = getProperties(context.properties()); return new InstallPluginStmt(pluginPath, properties, createPos(context)); } @Override public ParseNode visitUninstallPluginStatement(StarRocksParser.UninstallPluginStatementContext context) { String pluginPath = ((Identifier) visit(context.identifierOrString())).getValue(); return new UninstallPluginStmt(pluginPath, createPos(context)); } @Override public ParseNode visitCreateFileStatement(StarRocksParser.CreateFileStatementContext context) { String fileName = ((StringLiteral) visit(context.string())).getStringValue(); String catalog = null; if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } Map<String, String> properties = getProperties(context.properties()); return new CreateFileStmt(fileName, catalog, properties, createPos(context)); } @Override public ParseNode visitDropFileStatement(StarRocksParser.DropFileStatementContext context) { String fileName = ((StringLiteral) visit(context.string())).getStringValue(); String catalog = null; if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } Map<String, String> properties = getProperties(context.properties()); return new DropFileStmt(fileName, catalog, properties, createPos(context)); } @Override public ParseNode visitShowSmallFilesStatement(StarRocksParser.ShowSmallFilesStatementContext context) { String catalog = null; if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } return new ShowSmallFilesStmt(catalog, createPos(context)); } @Override public ParseNode visitSetStatement(StarRocksParser.SetStatementContext context) { List<SetListItem> propertyList = visit(context.setVar(), SetListItem.class); return new SetStmt(propertyList, createPos(context)); } @Override public ParseNode visitSetNames(StarRocksParser.SetNamesContext context) { NodePosition pos = createPos(context); if (context.CHAR() != null || context.CHARSET() != null) { if (context.identifierOrString().isEmpty()) { return new SetNamesVar(null, null, pos); } else { return new SetNamesVar( ((Identifier) visit(context.identifierOrString().get(0))).getValue(), null, pos); } } else { String charset = null; if (context.charset != null) { charset = ((Identifier) visit(context.charset)).getValue(); } String collate = null; if (context.collate != null) { collate = ((Identifier) visit(context.collate)).getValue(); } return new SetNamesVar(charset, collate, pos); } } @Override public ParseNode visitSetPassword(StarRocksParser.SetPasswordContext context) { NodePosition pos = createPos(context); String passwordText; StringLiteral stringLiteral = (StringLiteral) visit(context.string()); if (context.PASSWORD().size() > 1) { passwordText = new String(MysqlPassword.makeScrambledPassword(stringLiteral.getStringValue())); } else { passwordText = stringLiteral.getStringValue(); } if (context.user() != null) { return new SetPassVar((UserIdentity) visit(context.user()), passwordText, pos); } else { return new SetPassVar(null, passwordText, pos); } } @Override public ParseNode visitSetUserVar(StarRocksParser.SetUserVarContext context) { VariableExpr variableDesc = (VariableExpr) visit(context.userVariable()); Expr expr = (Expr) visit(context.expression()); return new UserVariable(variableDesc.getName(), expr, createPos(context)); } @Override public ParseNode visitSetSystemVar(StarRocksParser.SetSystemVarContext context) { NodePosition pos = createPos(context); if (context.systemVariable() != null) { VariableExpr variableDesc = (VariableExpr) visit(context.systemVariable()); Expr expr = (Expr) visit(context.setExprOrDefault()); return new SystemVariable(variableDesc.getSetType(), variableDesc.getName(), expr, pos); } else { Expr expr = (Expr) visit(context.setExprOrDefault()); String variable = ((Identifier) visit(context.identifier())).getValue(); if (context.varType() != null) { return new SystemVariable(getVariableType(context.varType()), variable, expr, pos); } else { return new SystemVariable(SetType.SESSION, variable, expr, pos); } } } @Override public ParseNode visitSetTransaction(StarRocksParser.SetTransactionContext context) { return new SetTransaction(createPos(context)); } @Override public ParseNode visitSetUserPropertyStatement(StarRocksParser.SetUserPropertyStatementContext context) { String user = context.FOR() == null ? null : ((StringLiteral) visit(context.string())).getValue(); List<SetUserPropertyVar> list = new ArrayList<>(); if (context.userPropertyList() != null) { List<Property> propertyList = visit(context.userPropertyList().property(), Property.class); for (Property property : propertyList) { SetUserPropertyVar setVar = new SetUserPropertyVar(property.getKey(), property.getValue()); list.add(setVar); } } return new SetUserPropertyStmt(user, list, createPos(context)); } @Override public ParseNode visitSetExprOrDefault(StarRocksParser.SetExprOrDefaultContext context) { if (context.DEFAULT() != null) { return null; } else if (context.ON() != null) { return new StringLiteral("ON"); } else if (context.ALL() != null) { return new StringLiteral("ALL"); } else { return visit(context.expression()); } } @Override public ParseNode visitExecuteScriptStatement(StarRocksParser.ExecuteScriptStatementContext context) { long beId = -1; if (context.INTEGER_VALUE() != null) { beId = Long.parseLong(context.INTEGER_VALUE().getText()); } StringLiteral stringLiteral = (StringLiteral) visit(context.string()); String script = stringLiteral.getStringValue(); return new ExecuteScriptStmt(beId, script, createPos(context)); } @Override public ParseNode visitCreateStorageVolumeStatement(StarRocksParser.CreateStorageVolumeStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String svName = identifier.getValue(); String storageType = ((Identifier) visit(context.typeDesc().identifier())).getValue(); List<StarRocksParser.StringContext> locationList = context.locationsDesc().stringList().string(); List<String> locations = new ArrayList<>(); for (StarRocksParser.StringContext location : locationList) { locations.add(((StringLiteral) visit(location)).getValue()); } return new CreateStorageVolumeStmt(context.IF() != null, svName, storageType, getProperties(context.properties()), locations, context.comment() == null ? null : ((StringLiteral) visit(context.comment().string())).getStringValue(), createPos(context)); } @Override public ParseNode visitShowStorageVolumesStatement(StarRocksParser.ShowStorageVolumesStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } return new ShowStorageVolumesStmt(pattern, createPos(context)); } @Override public ParseNode visitAlterStorageVolumeStatement(StarRocksParser.AlterStorageVolumeStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String svName = identifier.getValue(); NodePosition pos = createPos(context); List<AlterStorageVolumeClause> alterClauses = visit(context.alterStorageVolumeClause(), AlterStorageVolumeClause.class); Map<String, String> properties = new HashMap<>(); String comment = null; for (AlterStorageVolumeClause clause : alterClauses) { if (clause.getOpType().equals(AlterStorageVolumeClause.AlterOpType.ALTER_COMMENT)) { comment = ((AlterStorageVolumeCommentClause) clause).getNewComment(); } else if (clause.getOpType().equals(AlterStorageVolumeClause.AlterOpType.MODIFY_PROPERTIES)) { properties = ((ModifyStorageVolumePropertiesClause) clause).getProperties(); } } return new AlterStorageVolumeStmt(svName, properties, comment, pos); } @Override public ParseNode visitDropStorageVolumeStatement(StarRocksParser.DropStorageVolumeStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String svName = identifier.getValue(); return new DropStorageVolumeStmt(context.IF() != null, svName, createPos(context)); } @Override public ParseNode visitDescStorageVolumeStatement(StarRocksParser.DescStorageVolumeStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String svName = identifier.getValue(); return new DescStorageVolumeStmt(svName, createPos(context)); } @Override public ParseNode visitSetDefaultStorageVolumeStatement( StarRocksParser.SetDefaultStorageVolumeStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String svName = identifier.getValue(); return new SetDefaultStorageVolumeStmt(svName, createPos(context)); } @Override public ParseNode visitModifyStorageVolumeCommentClause( StarRocksParser.ModifyStorageVolumeCommentClauseContext context) { String comment = ((StringLiteral) visit(context.string())).getStringValue(); return new AlterStorageVolumeCommentClause(comment, createPos(context)); } @Override public ParseNode visitModifyStorageVolumePropertiesClause( StarRocksParser.ModifyStorageVolumePropertiesClauseContext context) { Map<String, String> properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } return new ModifyStorageVolumePropertiesClause(properties, createPos(context)); } @Override public ParseNode visitUpdateFailPointStatusStatement( StarRocksParser.UpdateFailPointStatusStatementContext ctx) { String failpointName = ((StringLiteral) visit(ctx.string(0))).getStringValue(); List<String> backendList = null; if (ctx.BACKEND() != null) { String tmp = ((StringLiteral) visit(ctx.string(1))).getStringValue(); backendList = Lists.newArrayList(tmp.split(",")); } if (ctx.ENABLE() != null) { if (ctx.TIMES() != null) { int nTimes = Integer.parseInt(ctx.INTEGER_VALUE().getText()); if (nTimes <= 0) { throw new ParsingException(String.format( "Invalid TIMES value %d, it should be a positive integer", nTimes)); } return new UpdateFailPointStatusStatement(failpointName, nTimes, backendList, createPos(ctx)); } else if (ctx.PROBABILITY() != null) { double probability = Double.parseDouble(ctx.DECIMAL_VALUE().getText()); if (probability < 0 || probability > 1) { throw new ParsingException(String.format( "Invalid PROBABILITY value %f, it should be in range [0, 1]", probability)); } return new UpdateFailPointStatusStatement(failpointName, probability, backendList, createPos(ctx)); } return new UpdateFailPointStatusStatement(failpointName, true, backendList, createPos(ctx)); } return new UpdateFailPointStatusStatement(failpointName, false, backendList, createPos(ctx)); } @Override public ParseNode visitShowFailPointStatement(StarRocksParser.ShowFailPointStatementContext ctx) { String pattern = null; List<String> backendList = null; int idx = 0; if (ctx.LIKE() != null) { pattern = ((StringLiteral) visit(ctx.string(idx++))).getStringValue(); } if (ctx.BACKEND() != null) { String tmp = ((StringLiteral) visit(ctx.string(idx++))).getStringValue(); backendList = Lists.newArrayList(tmp.split(",")); } return new ShowFailPointStatement(pattern, backendList, createPos(ctx)); } @Override public ParseNode visitCreateDictionaryStatement(StarRocksParser.CreateDictionaryStatementContext context) { String dictionaryName = getQualifiedName(context.dictionaryName().qualifiedName()).toString(); String queryableObject = getQualifiedName(context.qualifiedName()).toString(); List<StarRocksParser.DictionaryColumnDescContext> dictionaryColumnDescs = context.dictionaryColumnDesc(); List<String> dictionaryKeys = new ArrayList<>(); List<String> dictionaryValues = new ArrayList<>(); for (StarRocksParser.DictionaryColumnDescContext desc : dictionaryColumnDescs) { String columnName = getQualifiedName(desc.qualifiedName()).toString(); if (desc.KEY() != null) { dictionaryKeys.add(columnName); } if (desc.VALUE() != null) { dictionaryValues.add(columnName); } } Map<String, String> properties = null; if (context.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new CreateDictionaryStmt(dictionaryName, queryableObject, dictionaryKeys, dictionaryValues, properties, createPos(context)); } @Override public ParseNode visitDropDictionaryStatement(StarRocksParser.DropDictionaryStatementContext context) { String dictionaryName = getQualifiedName(context.qualifiedName()).toString(); boolean cacheOnly = false; if (context.CACHE() != null) { cacheOnly = true; } return new DropDictionaryStmt(dictionaryName, cacheOnly, createPos(context)); } @Override public ParseNode visitRefreshDictionaryStatement(StarRocksParser.RefreshDictionaryStatementContext context) { String dictionaryName = getQualifiedName(context.qualifiedName()).toString(); return new RefreshDictionaryStmt(dictionaryName, createPos(context)); } @Override public ParseNode visitShowDictionaryStatement(StarRocksParser.ShowDictionaryStatementContext context) { String dictionaryName = null; if (context.qualifiedName() != null) { dictionaryName = getQualifiedName(context.qualifiedName()).toString(); } return new ShowDictionaryStmt(dictionaryName, createPos(context)); } @Override public ParseNode visitCancelRefreshDictionaryStatement( StarRocksParser.CancelRefreshDictionaryStatementContext context) { String dictionaryName = getQualifiedName(context.qualifiedName()).toString(); return new CancelRefreshDictionaryStmt(dictionaryName, createPos(context)); } @Override public ParseNode visitUnsupportedStatement(StarRocksParser.UnsupportedStatementContext context) { return new UnsupportedStmt(createPos(context)); } @Override public ParseNode visitAddFrontendClause(StarRocksParser.AddFrontendClauseContext context) { String cluster = ((StringLiteral) visit(context.string())).getStringValue(); NodePosition pos = createPos(context); if (context.FOLLOWER() != null) { return new AddFollowerClause(cluster, pos); } else { return new AddObserverClause(cluster, pos); } } @Override public ParseNode visitDropFrontendClause(StarRocksParser.DropFrontendClauseContext context) { String cluster = ((StringLiteral) visit(context.string())).getStringValue(); NodePosition pos = createPos(context); if (context.FOLLOWER() != null) { return new DropFollowerClause(cluster, pos); } else { return new DropObserverClause(cluster, pos); } } @Override public ParseNode visitModifyFrontendHostClause(StarRocksParser.ModifyFrontendHostClauseContext context) { List<String> clusters = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new ModifyFrontendAddressClause(clusters.get(0), clusters.get(1), createPos(context)); } @Override public ParseNode visitAddBackendClause(StarRocksParser.AddBackendClauseContext context) { List<String> backends = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new AddBackendClause(backends, createPos(context)); } @Override public ParseNode visitDropBackendClause(StarRocksParser.DropBackendClauseContext context) { List<String> clusters = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new DropBackendClause(clusters, context.FORCE() != null, createPos(context)); } @Override public ParseNode visitDecommissionBackendClause(StarRocksParser.DecommissionBackendClauseContext context) { List<String> clusters = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new DecommissionBackendClause(clusters, createPos(context)); } @Override public ParseNode visitModifyBackendClause(StarRocksParser.ModifyBackendClauseContext context) { List<String> strings = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); if (context.HOST() != null) { return new ModifyBackendClause(strings.get(0), strings.get(1), createPos(context)); } else { String backendHostPort = strings.get(0); Map<String, String> properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } return new ModifyBackendClause(backendHostPort, properties, createPos(context)); } } @Override public ParseNode visitAddComputeNodeClause(StarRocksParser.AddComputeNodeClauseContext context) { List<String> hostPorts = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new AddComputeNodeClause(hostPorts); } @Override public ParseNode visitDropComputeNodeClause(StarRocksParser.DropComputeNodeClauseContext context) { List<String> hostPorts = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new DropComputeNodeClause(hostPorts, createPos(context)); } @Override public ParseNode visitModifyBrokerClause(StarRocksParser.ModifyBrokerClauseContext context) { String brokerName = ((Identifier) visit(context.identifierOrString())).getValue(); NodePosition pos = createPos(context); if (context.ALL() != null) { return ModifyBrokerClause.createDropAllBrokerClause(brokerName, pos); } List<String> hostPorts = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); if (context.ADD() != null) { return ModifyBrokerClause.createAddBrokerClause(brokerName, hostPorts, pos); } return ModifyBrokerClause.createDropBrokerClause(brokerName, hostPorts, pos); } @Override public ParseNode visitAlterLoadErrorUrlClause(StarRocksParser.AlterLoadErrorUrlClauseContext context) { return new AlterLoadErrorUrlClause(getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitCreateImageClause(StarRocksParser.CreateImageClauseContext context) { return new CreateImageClause(createPos(context)); } @Override public ParseNode visitCleanTabletSchedQClause( StarRocksParser.CleanTabletSchedQClauseContext context) { return new CleanTabletSchedQClause(createPos(context)); } @Override public ParseNode visitCreateIndexClause(StarRocksParser.CreateIndexClauseContext context) { Token start = context.identifier().start; String indexName = ((Identifier) visit(context.identifier())).getValue(); List<Identifier> columnList = visit(context.identifierList().identifier(), Identifier.class); Token stop = context.identifierList().stop; String comment = null; if (context.comment() != null) { stop = context.comment().stop; comment = ((StringLiteral) visit(context.comment())).getStringValue(); } IndexDef indexDef = new IndexDef(indexName, columnList.stream().map(Identifier::getValue).collect(toList()), getIndexType(context.indexType()), comment, getPropertyList(context.propertyList()), createPos(start, stop)); return new CreateIndexClause(indexDef, createPos(context)); } @Override public ParseNode visitDropIndexClause(StarRocksParser.DropIndexClauseContext context) { Identifier identifier = (Identifier) visit(context.identifier()); return new DropIndexClause(identifier.getValue(), createPos(context)); } @Override public ParseNode visitTableRenameClause(StarRocksParser.TableRenameClauseContext context) { Identifier identifier = (Identifier) visit(context.identifier()); return new TableRenameClause(identifier.getValue(), createPos(context)); } @Override public ParseNode visitModifyCommentClause(StarRocksParser.ModifyCommentClauseContext context) { String comment = ((StringLiteral) visit(context.string())).getStringValue(); return new AlterTableCommentClause(comment, createPos(context)); } @Override public ParseNode visitSwapTableClause(StarRocksParser.SwapTableClauseContext context) { Identifier identifier = (Identifier) visit(context.identifier()); return new SwapTableClause(identifier.getValue(), createPos(context)); } @Override public ParseNode visitModifyPropertiesClause(StarRocksParser.ModifyPropertiesClauseContext context) { Map<String, String> properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } return new ModifyTablePropertiesClause(properties, createPos(context)); } @Override public ParseNode visitOptimizeClause(StarRocksParser.OptimizeClauseContext context) { return new OptimizeClause( context.keyDesc() == null ? null : getKeysDesc(context.keyDesc()), context.partitionDesc() == null ? null : getPartitionDesc(context.partitionDesc(), null), context.distributionDesc() == null ? null : (DistributionDesc) visit(context.distributionDesc()), context.orderByDesc() == null ? null : visit(context.orderByDesc().identifierList().identifier(), Identifier.class) .stream().map(Identifier::getValue).collect(toList()), context.partitionNames() == null ? null : (PartitionNames) visit(context.partitionNames()), createPos(context)); } @Override public ParseNode visitAddColumnClause(StarRocksParser.AddColumnClauseContext context) { ColumnDef columnDef = getColumnDef(context.columnDesc()); if (columnDef.isAutoIncrement()) { throw new ParsingException(PARSER_ERROR_MSG.autoIncrementForbid(columnDef.getName(), "ADD"), columnDef.getPos()); } ColumnPosition columnPosition = null; if (context.FIRST() != null) { columnPosition = ColumnPosition.FIRST; } else if (context.AFTER() != null) { StarRocksParser.IdentifierContext identifier = context.identifier(0); String afterColumnName = getIdentifierName(identifier); columnPosition = new ColumnPosition(afterColumnName, createPos(identifier)); } String rollupName = null; if (context.rollupName != null) { rollupName = getIdentifierName(context.rollupName); } Map<String, String> properties = new HashMap<>(); ; properties = getProperties(context.properties()); if (columnDef.isGeneratedColumn()) { if (rollupName != null) { throw new ParsingException( PARSER_ERROR_MSG.generatedColumnLimit("rollupName", "ADD GENERATED COLUMN"), columnDef.getPos()); } if (columnPosition != null) { throw new ParsingException( PARSER_ERROR_MSG.generatedColumnLimit("AFTER", "ADD GENERATED COLUMN"), columnDef.getPos()); } if (properties.size() != 0) { throw new ParsingException( PARSER_ERROR_MSG.generatedColumnLimit("properties", "ADD GENERATED COLUMN"), columnDef.getPos()); } } return new AddColumnClause(columnDef, columnPosition, rollupName, properties, createPos(context)); } @Override public ParseNode visitAddColumnsClause(StarRocksParser.AddColumnsClauseContext context) { List<ColumnDef> columnDefs = getColumnDefs(context.columnDesc()); Map<String, String> properties = new HashMap<>(); properties = getProperties(context.properties()); String rollupName = null; if (context.rollupName != null) { rollupName = getIdentifierName(context.rollupName); } for (ColumnDef columnDef : columnDefs) { if (columnDef.isAutoIncrement()) { throw new ParsingException(PARSER_ERROR_MSG.autoIncrementForbid(columnDef.getName(), "ADD"), columnDef.getPos()); } if (columnDef.isGeneratedColumn()) { if (rollupName != null) { throw new ParsingException( PARSER_ERROR_MSG.generatedColumnLimit("rollupName", "ADD GENERATED COLUMN"), columnDef.getPos()); } if (properties.size() != 0) { throw new ParsingException( PARSER_ERROR_MSG.generatedColumnLimit("properties", "ADD GENERATED COLUMN"), columnDef.getPos()); } } } return new AddColumnsClause(columnDefs, rollupName, getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitDropColumnClause(StarRocksParser.DropColumnClauseContext context) { String columnName = getIdentifierName(context.identifier(0)); String rollupName = null; if (context.rollupName != null) { rollupName = getIdentifierName(context.rollupName); } return new DropColumnClause(columnName, rollupName, getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitModifyColumnClause(StarRocksParser.ModifyColumnClauseContext context) { ColumnDef columnDef = getColumnDef(context.columnDesc()); if (columnDef.isAutoIncrement()) { throw new ParsingException(PARSER_ERROR_MSG.autoIncrementForbid(columnDef.getName(), "MODIFY"), columnDef.getPos()); } ColumnPosition columnPosition = null; if (context.FIRST() != null) { columnPosition = ColumnPosition.FIRST; } else if (context.AFTER() != null) { StarRocksParser.IdentifierContext identifier = context.identifier(0); String afterColumnName = getIdentifierName(identifier); columnPosition = new ColumnPosition(afterColumnName, createPos(identifier)); } String rollupName = null; if (context.rollupName != null) { rollupName = getIdentifierName(context.rollupName); } if (columnDef.isGeneratedColumn()) { if (rollupName != null) { throw new ParsingException(PARSER_ERROR_MSG.generatedColumnLimit("rollupName", "MODIFY GENERATED COLUMN"), columnDef.getPos()); } if (columnPosition != null) { throw new ParsingException(PARSER_ERROR_MSG.generatedColumnLimit("columnPosition", "MODIFY GENERATED COLUMN"), columnDef.getPos()); } } return new ModifyColumnClause(columnDef, columnPosition, rollupName, getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitColumnRenameClause(StarRocksParser.ColumnRenameClauseContext context) { String oldColumnName = getIdentifierName(context.oldColumn); String newColumnName = getIdentifierName(context.newColumn); return new ColumnRenameClause(oldColumnName, newColumnName, createPos(context)); } @Override public ParseNode visitReorderColumnsClause(StarRocksParser.ReorderColumnsClauseContext context) { List<String> cols = context.identifierList().identifier().stream().map(this::getIdentifierName).collect(toList()); String rollupName = null; if (context.rollupName != null) { rollupName = getIdentifierName(context.rollupName); } return new ReorderColumnsClause(cols, rollupName, getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitRollupRenameClause(StarRocksParser.RollupRenameClauseContext context) { String rollupName = ((Identifier) visit(context.rollupName)).getValue(); String newRollupName = ((Identifier) visit(context.newRollupName)).getValue(); return new RollupRenameClause(rollupName, newRollupName, createPos(context)); } @Override public ParseNode visitCompactionClause(StarRocksParser.CompactionClauseContext ctx) { NodePosition pos = createPos(ctx); boolean baseCompaction = ctx.CUMULATIVE() == null; if (ctx.identifier() != null) { final String partitionName = ((Identifier) visit(ctx.identifier())).getValue(); return new CompactionClause(Collections.singletonList(partitionName), baseCompaction, pos); } else if (ctx.identifierList() != null) { final List<Identifier> identifierList = visit(ctx.identifierList().identifier(), Identifier.class); return new CompactionClause(identifierList.stream().map(Identifier::getValue).collect(toList()), baseCompaction, pos); } else { return new CompactionClause(baseCompaction, pos); } } @Override public ParseNode visitAddPartitionClause(StarRocksParser.AddPartitionClauseContext context) { boolean temporary = context.TEMPORARY() != null; PartitionDesc partitionDesc = null; if (context.singleRangePartition() != null) { partitionDesc = (PartitionDesc) visitSingleRangePartition(context.singleRangePartition()); } else if (context.multiRangePartition() != null) { partitionDesc = (PartitionDesc) visitMultiRangePartition(context.multiRangePartition()); } else if (context.singleItemListPartitionDesc() != null) { partitionDesc = (PartitionDesc) visitSingleItemListPartitionDesc(context.singleItemListPartitionDesc()); } else if (context.multiItemListPartitionDesc() != null) { partitionDesc = (PartitionDesc) visitMultiItemListPartitionDesc(context.multiItemListPartitionDesc()); } DistributionDesc distributionDesc = null; if (context.distributionDesc() != null) { distributionDesc = (DistributionDesc) visitDistributionDesc(context.distributionDesc()); } Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new AddPartitionClause(partitionDesc, distributionDesc, properties, temporary, createPos(context)); } @Override public ParseNode visitDropPartitionClause(StarRocksParser.DropPartitionClauseContext context) { String partitionName = ((Identifier) visit(context.identifier())).getValue(); boolean temp = context.TEMPORARY() != null; boolean force = context.FORCE() != null; boolean exists = context.EXISTS() != null; return new DropPartitionClause(exists, partitionName, temp, force, createPos(context)); } @Override public ParseNode visitTruncatePartitionClause(StarRocksParser.TruncatePartitionClauseContext context) { PartitionNames partitionNames = null; if (context.partitionNames() != null) { partitionNames = (PartitionNames) visit(context.partitionNames()); } return new TruncatePartitionClause(partitionNames, createPos(context)); } @Override public ParseNode visitModifyPartitionClause(StarRocksParser.ModifyPartitionClauseContext context) { Map<String, String> properties = null; NodePosition pos = createPos(context); if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } if (context.identifier() != null) { final String partitionName = ((Identifier) visit(context.identifier())).getValue(); return new ModifyPartitionClause(Collections.singletonList(partitionName), properties, pos); } else if (context.identifierList() != null) { final List<Identifier> identifierList = visit(context.identifierList().identifier(), Identifier.class); return new ModifyPartitionClause(identifierList.stream().map(Identifier::getValue).collect(toList()), properties, pos); } else { return ModifyPartitionClause.createStarClause(properties, pos); } } @Override public ParseNode visitReplacePartitionClause(StarRocksParser.ReplacePartitionClauseContext context) { PartitionNames partitionNames = (PartitionNames) visit(context.parName); PartitionNames newPartitionNames = (PartitionNames) visit(context.tempParName); return new ReplacePartitionClause(partitionNames, newPartitionNames, getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitPartitionRenameClause(StarRocksParser.PartitionRenameClauseContext context) { String partitionName = ((Identifier) visit(context.parName)).getValue(); String newPartitionName = ((Identifier) visit(context.newParName)).getValue(); return new PartitionRenameClause(partitionName, newPartitionName, createPos(context)); } private PipeName resolvePipeName(StarRocksParser.QualifiedNameContext context) { String dbName = null; String pipeName = null; QualifiedName qualifiedName = getQualifiedName(context); if (qualifiedName.getParts().size() == 2) { dbName = qualifiedName.getParts().get(0); pipeName = qualifiedName.getParts().get(1); } else if (qualifiedName.getParts().size() == 1) { pipeName = qualifiedName.getParts().get(0); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidPipeName(qualifiedName.toString())); } if (dbName != null && pipeName != null) { return new PipeName(createPos(context), dbName, pipeName); } else if (pipeName != null) { return new PipeName(createPos(context), pipeName); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidPipeName(qualifiedName.toString())); } } @Override public ParseNode visitCreatePipeStatement(StarRocksParser.CreatePipeStatementContext context) { PipeName pipeName = resolvePipeName(context.qualifiedName()); boolean ifNotExists = context.ifNotExists() != null && context.ifNotExists().IF() != null; boolean replace = context.orReplace() != null && context.orReplace().OR() != null; if (ifNotExists && replace) { throw new ParsingException(PARSER_ERROR_MSG.conflictedOptions("OR REPLACE", "IF NOT EXISTS")); } ParseNode insertNode = visit(context.insertStatement()); if (!(insertNode instanceof InsertStmt)) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedStatement(insertNode.toSql()), context.insertStatement()); } Map<String, String> properties = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } InsertStmt insertStmt = (InsertStmt) insertNode; int insertSqlIndex = context.insertStatement().start.getStartIndex(); return new CreatePipeStmt(ifNotExists, replace, pipeName, insertSqlIndex, insertStmt, properties, createPos(context)); } @Override public ParseNode visitDropPipeStatement(StarRocksParser.DropPipeStatementContext context) { PipeName pipeName = resolvePipeName(context.qualifiedName()); boolean ifExists = context.IF() != null; return new DropPipeStmt(ifExists, pipeName, createPos(context)); } @Override public ParseNode visitShowPipeStatement(StarRocksParser.ShowPipeStatementContext context) { String dbName = null; if (context.qualifiedName() != null) { dbName = getQualifiedName(context.qualifiedName()).toString(); } List<OrderByElement> orderBy = null; if (context.ORDER() != null) { orderBy = new ArrayList<>(); orderBy.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limit = null; if (context.limitElement() != null) { limit = (LimitElement) visit(context.limitElement()); } if (context.LIKE() != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new ShowPipeStmt(dbName, stringLiteral.getValue(), null, orderBy, limit, createPos(context)); } else if (context.WHERE() != null) { return new ShowPipeStmt(dbName, null, (Expr) visit(context.expression()), orderBy, limit, createPos(context)); } else { return new ShowPipeStmt(dbName, null, null, orderBy, limit, createPos(context)); } } @Override public ParseNode visitDescPipeStatement(StarRocksParser.DescPipeStatementContext context) { PipeName pipeName = resolvePipeName(context.qualifiedName()); return new DescPipeStmt(createPos(context), pipeName); } @Override public ParseNode visitAlterPipeClause(StarRocksParser.AlterPipeClauseContext context) { if (context.SUSPEND() != null) { return new AlterPipePauseResume(createPos(context), true); } else if (context.RESUME() != null) { return new AlterPipePauseResume(createPos(context), false); } else if (context.RETRY() != null) { if (context.ALL() != null) { return new AlterPipeClauseRetry(createPos(context), true); } else { String fileName = ((StringLiteral) visitString(context.fileName)).getStringValue(); return new AlterPipeClauseRetry(createPos(context), false, fileName); } } else if (context.SET() != null) { Map<String, String> properties = getPropertyList(context.propertyList()); if (MapUtils.isEmpty(properties)) { throw new ParsingException("empty property"); } return new AlterPipeSetProperty(createPos(context), properties); } else { throw new ParsingException(PARSER_ERROR_MSG.unsupportedOpWithInfo(context.toString())); } } @Override public ParseNode visitAlterPipeStatement(StarRocksParser.AlterPipeStatementContext context) { PipeName pipeName = resolvePipeName(context.qualifiedName()); AlterPipeClause alterPipeClause = (AlterPipeClause) visit(context.alterPipeClause()); return new AlterPipeStmt(createPos(context), pipeName, alterPipeClause); } @Override public ParseNode visitQueryStatement(StarRocksParser.QueryStatementContext context) { QueryRelation queryRelation = (QueryRelation) visit(context.queryRelation()); QueryStatement queryStatement = new QueryStatement(queryRelation); if (context.outfile() != null) { queryStatement.setOutFileClause((OutFileClause) visit(context.outfile())); } if (context.explainDesc() != null) { queryStatement.setIsExplain(true, getExplainType(context.explainDesc())); } if (context.optimizerTrace() != null) { String module = "base"; if (context.optimizerTrace().identifier() != null) { module = ((Identifier) visit(context.optimizerTrace().identifier())).getValue(); } queryStatement.setIsTrace(getTraceMode(context.optimizerTrace()), module); } return queryStatement; } private Tracers.Mode getTraceMode(StarRocksParser.OptimizerTraceContext context) { if (context.LOGS() != null) { return Tracers.Mode.LOGS; } else if (context.VALUES() != null) { return Tracers.Mode.VARS; } else if (context.TIMES() != null) { return Tracers.Mode.TIMER; } else if (context.ALL() != null) { return Tracers.Mode.TIMING; } else { return Tracers.Mode.NONE; } } @Override public ParseNode visitQueryRelation(StarRocksParser.QueryRelationContext context) { QueryRelation queryRelation = (QueryRelation) visit(context.queryNoWith()); List<CTERelation> withQuery = new ArrayList<>(); if (context.withClause() != null) { withQuery = visit(context.withClause().commonTableExpression(), CTERelation.class); } withQuery.forEach(queryRelation::addCTERelation); return queryRelation; } @Override public ParseNode visitCommonTableExpression(StarRocksParser.CommonTableExpressionContext context) { QueryRelation queryRelation = (QueryRelation) visit(context.queryRelation()); return new CTERelation( RelationId.of(queryRelation).hashCode(), ((Identifier) visit(context.name)).getValue(), getColumnNames(context.columnAliases()), new QueryStatement(queryRelation), queryRelation.getPos()); } @Override public ParseNode visitQueryNoWith(StarRocksParser.QueryNoWithContext context) { List<OrderByElement> orderByElements = new ArrayList<>(); if (context.ORDER() != null) { orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } QueryRelation queryRelation = (QueryRelation) visit(context.queryPrimary()); queryRelation.setOrderBy(orderByElements); queryRelation.setLimit(limitElement); return queryRelation; } @Override public ParseNode visitSetOperation(StarRocksParser.SetOperationContext context) { NodePosition pos = createPos(context); QueryRelation left = (QueryRelation) visit(context.left); QueryRelation right = (QueryRelation) visit(context.right); boolean distinct = true; if (context.setQuantifier() != null) { if (context.setQuantifier().DISTINCT() != null) { distinct = true; } else if (context.setQuantifier().ALL() != null) { distinct = false; } } SetQualifier setQualifier = distinct ? SetQualifier.DISTINCT : SetQualifier.ALL; switch (context.operator.getType()) { case StarRocksLexer.UNION: if (left instanceof UnionRelation && ((UnionRelation) left).getQualifier().equals(setQualifier)) { ((UnionRelation) left).addRelation(right); return left; } else { return new UnionRelation(Lists.newArrayList(left, right), setQualifier, pos); } case StarRocksLexer.INTERSECT: if (left instanceof IntersectRelation && ((IntersectRelation) left).getQualifier().equals(setQualifier)) { ((IntersectRelation) left).addRelation(right); return left; } else { return new IntersectRelation(Lists.newArrayList(left, right), setQualifier, pos); } default: if (left instanceof ExceptRelation && ((ExceptRelation) left).getQualifier().equals(setQualifier)) { ((ExceptRelation) left).addRelation(right); return left; } else { return new ExceptRelation(Lists.newArrayList(left, right), setQualifier, pos); } } } private Map<String, String> extractVarHintValues(List<HintNode> hints) { Map<String, String> selectHints = new HashMap<>(); if (CollectionUtils.isEmpty(hints)) { return selectHints; } for (HintNode hintNode : hints) { if (hintNode instanceof SetVarHint) { selectHints.putAll(hintNode.getValue()); } } return selectHints; } @Override public ParseNode visitQuerySpecification(StarRocksParser.QuerySpecificationContext context) { Relation from = null; List<SelectListItem> selectItems = visit(context.selectItem(), SelectListItem.class); if (context.fromClause() instanceof StarRocksParser.DualContext) { for (SelectListItem item : selectItems) { if (item.isStar()) { throw new ParsingException(PARSER_ERROR_MSG.noTableUsed(), item.getPos()); } } } else { StarRocksParser.FromContext fromContext = (StarRocksParser.FromContext) context.fromClause(); if (fromContext.relations() != null) { List<Relation> relations = visit(fromContext.relations().relation(), Relation.class); Iterator<Relation> iterator = relations.iterator(); Relation relation = iterator.next(); while (iterator.hasNext()) { Relation next = iterator.next(); relation = new JoinRelation(null, relation, next, null, false); } from = relation; } } /* from == null means a statement without from or from dual, add a single row of null values here, so that the semantics are the same, and the processing of subsequent query logic can be simplified, such as select sum(1) or select sum(1) from dual, will be converted to select sum(1) from (values(null)) t. This can share the same logic as select sum(1) from table */ if (from == null) { from = ValuesRelation.newDualRelation(); } boolean isDistinct = context.setQuantifier() != null && context.setQuantifier().DISTINCT() != null; SelectList selectList = new SelectList(selectItems, isDistinct); selectList.setHintNodes(hintMap.get(context)); SelectRelation resultSelectRelation = new SelectRelation( selectList, from, (Expr) visitIfPresent(context.where), (GroupByClause) visitIfPresent(context.groupingElement()), (Expr) visitIfPresent(context.having), createPos(context)); if (context.qualifyFunction != null) { resultSelectRelation.setOrderBy(new ArrayList<>()); SubqueryRelation subqueryRelation = new SubqueryRelation(new QueryStatement(resultSelectRelation)); TableName qualifyTableName = new TableName(null, "__QUALIFY__TABLE"); subqueryRelation.setAlias(qualifyTableName); SelectListItem windowFunction = selectItems.get(selectItems.size() - 1); windowFunction.setAlias("__QUALIFY__VALUE"); long selectValue = Long.parseLong(context.limit.getText()); List<SelectListItem> selectItemsVirtual = Lists.newArrayList(selectItems); selectItemsVirtual.remove(selectItemsVirtual.size() - 1); List<SelectListItem> selectItemsOuter = new ArrayList<>(); for (SelectListItem item : selectItemsVirtual) { if (item.getExpr() instanceof SlotRef) { SlotRef exprRef = (SlotRef) item.getExpr(); String columnName = item.getAlias() == null ? exprRef.getColumnName() : item.getAlias(); SlotRef resultSlotRef = new SlotRef(qualifyTableName, columnName); selectItemsOuter.add(new SelectListItem(resultSlotRef, null)); } else { throw new ParsingException("Can't support result other than column."); } } SelectList selectListOuter = new SelectList(selectItemsOuter, isDistinct); IntLiteral rightValue = new IntLiteral(selectValue); SlotRef leftSlotRef = new SlotRef(qualifyTableName, "__QUALIFY__VALUE"); BinaryType op = getComparisonOperator(((TerminalNode) context.comparisonOperator() .getChild(0)).getSymbol()); return new SelectRelation(selectListOuter, subqueryRelation, new BinaryPredicate(op, leftSlotRef, rightValue), null, null, createPos(context)); } else { return resultSelectRelation; } } @Override public ParseNode visitSelectSingle(StarRocksParser.SelectSingleContext context) { String alias = null; if (context.identifier() != null) { alias = ((Identifier) visit(context.identifier())).getValue(); } else if (context.string() != null) { alias = ((StringLiteral) visit(context.string())).getStringValue(); } return new SelectListItem((Expr) visit(context.expression()), alias, createPos(context)); } @Override public ParseNode visitSelectAll(StarRocksParser.SelectAllContext context) { NodePosition pos = createPos(context); if (context.qualifiedName() != null) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); return new SelectListItem(qualifiedNameToTableName(qualifiedName), pos); } return new SelectListItem(null, pos); } @Override public ParseNode visitSingleGroupingSet(StarRocksParser.SingleGroupingSetContext context) { return new GroupByClause(new ArrayList<>(visit(context.expressionList().expression(), Expr.class)), GroupByClause.GroupingType.GROUP_BY, createPos(context)); } @Override public ParseNode visitRollup(StarRocksParser.RollupContext context) { List<Expr> groupingExprs = visit(context.expressionList().expression(), Expr.class); return new GroupByClause(new ArrayList<>(groupingExprs), GroupByClause.GroupingType.ROLLUP, createPos(context)); } @Override public ParseNode visitCube(StarRocksParser.CubeContext context) { List<Expr> groupingExprs = visit(context.expressionList().expression(), Expr.class); return new GroupByClause(new ArrayList<>(groupingExprs), GroupByClause.GroupingType.CUBE, createPos(context)); } @Override public ParseNode visitMultipleGroupingSets(StarRocksParser.MultipleGroupingSetsContext context) { List<ArrayList<Expr>> groupingSets = new ArrayList<>(); for (StarRocksParser.GroupingSetContext groupingSetContext : context.groupingSet()) { List<Expr> l = visit(groupingSetContext.expression(), Expr.class); groupingSets.add(new ArrayList<>(l)); } return new GroupByClause(groupingSets, GroupByClause.GroupingType.GROUPING_SETS, createPos(context)); } @Override public ParseNode visitGroupingOperation(StarRocksParser.GroupingOperationContext context) { List<Expr> arguments = visit(context.expression(), Expr.class); return new GroupingFunctionCallExpr("grouping", arguments, createPos(context)); } @Override public ParseNode visitWindowFrame(StarRocksParser.WindowFrameContext context) { NodePosition pos = createPos(context); if (context.end != null) { return new AnalyticWindow( getFrameType(context.frameType), (AnalyticWindow.Boundary) visit(context.start), (AnalyticWindow.Boundary) visit(context.end), pos); } else { return new AnalyticWindow( getFrameType(context.frameType), (AnalyticWindow.Boundary) visit(context.start), pos); } } private static AnalyticWindow.Type getFrameType(Token type) { if (type.getType() == StarRocksLexer.RANGE) { return AnalyticWindow.Type.RANGE; } else { return AnalyticWindow.Type.ROWS; } } @Override public ParseNode visitUnboundedFrame(StarRocksParser.UnboundedFrameContext context) { return new AnalyticWindow.Boundary(getUnboundedFrameBoundType(context.boundType), null); } @Override public ParseNode visitBoundedFrame(StarRocksParser.BoundedFrameContext context) { return new AnalyticWindow.Boundary(getBoundedFrameBoundType(context.boundType), (Expr) visit(context.expression())); } @Override public ParseNode visitCurrentRowBound(StarRocksParser.CurrentRowBoundContext context) { return new AnalyticWindow.Boundary(AnalyticWindow.BoundaryType.CURRENT_ROW, null); } private static AnalyticWindow.BoundaryType getBoundedFrameBoundType(Token token) { if (token.getType() == StarRocksLexer.PRECEDING) { return AnalyticWindow.BoundaryType.PRECEDING; } else { return AnalyticWindow.BoundaryType.FOLLOWING; } } private static AnalyticWindow.BoundaryType getUnboundedFrameBoundType(Token token) { if (token.getType() == StarRocksLexer.PRECEDING) { return AnalyticWindow.BoundaryType.UNBOUNDED_PRECEDING; } else { return AnalyticWindow.BoundaryType.UNBOUNDED_FOLLOWING; } } @Override public ParseNode visitSortItem(StarRocksParser.SortItemContext context) { return new OrderByElement( (Expr) visit(context.expression()), getOrderingType(context.ordering), getNullOrderingType(getOrderingType(context.ordering), context.nullOrdering), createPos(context)); } private boolean getNullOrderingType(boolean isAsc, Token token) { if (token == null) { return (!SqlModeHelper.check(sqlMode, SqlModeHelper.MODE_SORT_NULLS_LAST)) == isAsc; } return token.getType() == StarRocksLexer.FIRST; } private static boolean getOrderingType(Token token) { if (token == null) { return true; } return token.getType() == StarRocksLexer.ASC; } @Override public ParseNode visitLimitElement(StarRocksParser.LimitElementContext context) { if (context.limit.getText().equals("?") || (context.offset != null && context.offset.getText().equals("?"))) { throw new ParsingException("using parameter(?) as limit or offset not supported"); } long limit = Long.parseLong(context.limit.getText()); long offset = 0; if (context.offset != null) { offset = Long.parseLong(context.offset.getText()); } return new LimitElement(offset, limit, createPos(context)); } @Override public ParseNode visitRelation(StarRocksParser.RelationContext context) { Relation relation = (Relation) visit(context.relationPrimary()); List<JoinRelation> joinRelations = visit(context.joinRelation(), JoinRelation.class); Relation leftChildRelation = relation; for (JoinRelation joinRelation : joinRelations) { joinRelation.setLeft(leftChildRelation); leftChildRelation = joinRelation; } return leftChildRelation; } @Override public ParseNode visitParenthesizedRelation(StarRocksParser.ParenthesizedRelationContext context) { if (context.relations().relation().size() == 1) { return visit(context.relations().relation().get(0)); } else { List<Relation> relations = visit(context.relations().relation(), Relation.class); Iterator<Relation> iterator = relations.iterator(); Relation relation = iterator.next(); while (iterator.hasNext()) { relation = new JoinRelation(null, relation, iterator.next(), null, false); } return relation; } } @Override public ParseNode visitTableAtom(StarRocksParser.TableAtomContext context) { Token start = context.start; Token stop = context.stop; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } List<Long> tabletIds = Lists.newArrayList(); if (context.tabletList() != null) { stop = context.tabletList().stop; tabletIds = context.tabletList().INTEGER_VALUE().stream().map(ParseTree::getText) .map(Long::parseLong).collect(toList()); } List<Long> replicaLists = Lists.newArrayList(); if (context.replicaList() != null) { stop = context.replicaList().stop; replicaLists = context.replicaList().INTEGER_VALUE().stream().map(ParseTree::getText).map(Long::parseLong) .collect(toList()); } TableRelation tableRelation = new TableRelation(tableName, partitionNames, tabletIds, replicaLists, createPos(start, stop)); if (context.bracketHint() != null) { for (Identifier identifier : visit(context.bracketHint().identifier(), Identifier.class)) { tableRelation.addTableHint(identifier.getValue()); } } if (context.alias != null) { Identifier identifier = (Identifier) visit(context.alias); tableRelation.setAlias(new TableName(null, identifier.getValue())); } if (context.temporalClause() != null) { StringBuilder sb = new StringBuilder(); for (ParseTree child : context.temporalClause().children) { sb.append(child.getText()); sb.append(" "); } tableRelation.setTemporalClause(sb.toString()); } return tableRelation; } @Override public ParseNode visitJoinRelation(StarRocksParser.JoinRelationContext context) { Relation left = null; Relation right = (Relation) visit(context.rightRelation); JoinOperator joinType = JoinOperator.INNER_JOIN; if (context.crossOrInnerJoinType() != null) { if (context.crossOrInnerJoinType().CROSS() != null) { joinType = JoinOperator.CROSS_JOIN; } else { joinType = JoinOperator.INNER_JOIN; } } else if (context.outerAndSemiJoinType().LEFT() != null) { if (context.outerAndSemiJoinType().OUTER() != null) { joinType = JoinOperator.LEFT_OUTER_JOIN; } else if (context.outerAndSemiJoinType().SEMI() != null) { joinType = JoinOperator.LEFT_SEMI_JOIN; } else if (context.outerAndSemiJoinType().ANTI() != null) { joinType = JoinOperator.LEFT_ANTI_JOIN; } else { joinType = JoinOperator.LEFT_OUTER_JOIN; } } else if (context.outerAndSemiJoinType().RIGHT() != null) { if (context.outerAndSemiJoinType().OUTER() != null) { joinType = JoinOperator.RIGHT_OUTER_JOIN; } else if (context.outerAndSemiJoinType().SEMI() != null) { joinType = JoinOperator.RIGHT_SEMI_JOIN; } else if (context.outerAndSemiJoinType().ANTI() != null) { joinType = JoinOperator.RIGHT_ANTI_JOIN; } else { joinType = JoinOperator.RIGHT_OUTER_JOIN; } } else if (context.outerAndSemiJoinType().FULL() != null) { joinType = JoinOperator.FULL_OUTER_JOIN; } Expr predicate = null; List<String> usingColNames = null; if (context.joinCriteria() != null) { if (context.joinCriteria().ON() != null) { predicate = (Expr) visit(context.joinCriteria().expression()); } else { List<Identifier> criteria = visit(context.joinCriteria().identifier(), Identifier.class); usingColNames = criteria.stream().map(Identifier::getValue).collect(Collectors.toList()); } } JoinRelation joinRelation = new JoinRelation(joinType, left, right, predicate, context.LATERAL() != null, createPos(context)); joinRelation.setUsingColNames(usingColNames); if (context.bracketHint() != null) { joinRelation.setJoinHint(((Identifier) visit(context.bracketHint().identifier(0))).getValue()); if (context.bracketHint().primaryExpression() != null) { joinRelation.setSkewColumn((Expr) visit(context.bracketHint().primaryExpression())); } if (context.bracketHint().literalExpressionList() != null) { joinRelation.setSkewValues(visit(context.bracketHint().literalExpressionList().literalExpression(), Expr.class)); } } return joinRelation; } @Override public ParseNode visitInlineTable(StarRocksParser.InlineTableContext context) { List<ValueList> rowValues = visit(context.rowConstructor(), ValueList.class); List<List<Expr>> rows = rowValues.stream().map(ValueList::getRow).collect(toList()); List<String> colNames = getColumnNames(context.columnAliases()); if (colNames == null) { colNames = new ArrayList<>(); for (int i = 0; i < rows.get(0).size(); ++i) { colNames.add("column_" + i); } } ValuesRelation valuesRelation = new ValuesRelation(rows, colNames, createPos(context)); if (context.alias != null) { Identifier identifier = (Identifier) visit(context.alias); valuesRelation.setAlias(new TableName(null, identifier.getValue())); } return valuesRelation; } @Override public ParseNode visitNamedArguments(StarRocksParser.NamedArgumentsContext context) { String name = ((Identifier) visit(context.identifier())).getValue(); if (name == null || name.isEmpty() || name.equals(" ")) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExpr(" The left of => shouldn't be empty")); } Expr node = (Expr) visit(context.expression()); if (node == null) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExpr(" The right of => shouldn't be null")); } return new NamedArgument(name, node); } @Override public ParseNode visitTableFunction(StarRocksParser.TableFunctionContext context) { QualifiedName functionName = getQualifiedName(context.qualifiedName()); List<Expr> parameters = visit(context.expressionList().expression(), Expr.class); FunctionCallExpr functionCallExpr = new FunctionCallExpr(FunctionName.createFnName(functionName.toString()), parameters); TableFunctionRelation tableFunctionRelation = new TableFunctionRelation(functionCallExpr); if (context.alias != null) { Identifier identifier = (Identifier) visit(context.alias); tableFunctionRelation.setAlias(new TableName(null, identifier.getValue())); } tableFunctionRelation.setColumnOutputNames(getColumnNames(context.columnAliases())); return tableFunctionRelation; } @Override public ParseNode visitNormalizedTableFunction(StarRocksParser.NormalizedTableFunctionContext context) { QualifiedName functionName = getQualifiedName(context.qualifiedName()); List<Expr> parameters = null; if (context.argumentList().expressionList() != null) { parameters = visit(context.argumentList().expressionList().expression(), Expr.class); } else { parameters = visit(context.argumentList().namedArgumentList().namedArgument(), Expr.class); } int namedArgNum = parameters.stream().filter(f -> f instanceof NamedArgument).collect(toList()).size(); if (namedArgNum > 0 && namedArgNum < parameters.size()) { throw new SemanticException("All arguments must be passed by name or all must be passed positionally"); } FunctionCallExpr functionCallExpr = new FunctionCallExpr(FunctionName.createFnName(functionName.toString()), parameters, createPos(context)); TableFunctionRelation relation = new TableFunctionRelation(functionCallExpr); if (context.alias != null) { Identifier identifier = (Identifier) visit(context.alias); relation.setAlias(new TableName(null, identifier.getValue())); } relation.setColumnOutputNames(getColumnNames(context.columnAliases())); return new NormalizedTableFunctionRelation(relation); } @Override public ParseNode visitFileTableFunction(StarRocksParser.FileTableFunctionContext context) { Map<String, String> properties = getPropertyList(context.propertyList()); return new FileTableFunctionRelation(properties, NodePosition.ZERO); } @Override public ParseNode visitRowConstructor(StarRocksParser.RowConstructorContext context) { ArrayList<Expr> row = new ArrayList<>(visit(context.expressionList().expression(), Expr.class)); return new ValueList(row, createPos(context)); } @Override public ParseNode visitPartitionNames(StarRocksParser.PartitionNamesContext context) { if (context.keyPartitions() != null) { return visit(context.keyPartitions()); } List<Identifier> identifierList = visit(context.identifierOrString(), Identifier.class); return new PartitionNames(context.TEMPORARY() != null, identifierList.stream().map(Identifier::getValue).collect(toList()), createPos(context)); } @Override public ParseNode visitKeyPartitionList(StarRocksParser.KeyPartitionListContext context) { List<String> partitionColNames = Lists.newArrayList(); List<Expr> partitionColValues = Lists.newArrayList(); for (StarRocksParser.KeyPartitionContext pair : context.keyPartition()) { Identifier partitionName = (Identifier) visit(pair.partitionColName); Expr partitionValue = (Expr) visit(pair.partitionColValue); partitionColNames.add(partitionName.getValue()); partitionColValues.add(partitionValue); } return new PartitionNames(false, new ArrayList<>(), partitionColNames, partitionColValues, NodePosition.ZERO); } @Override public ParseNode visitSubquery(StarRocksParser.SubqueryContext context) { return visit(context.queryRelation()); } @Override public ParseNode visitQueryWithParentheses(StarRocksParser.QueryWithParenthesesContext context) { QueryRelation relation = (QueryRelation) visit(context.subquery()); return new SubqueryRelation(new QueryStatement(relation)); } @Override public ParseNode visitSubqueryWithAlias(StarRocksParser.SubqueryWithAliasContext context) { QueryRelation queryRelation = (QueryRelation) visit(context.subquery()); SubqueryRelation subqueryRelation = new SubqueryRelation(new QueryStatement(queryRelation)); if (context.alias != null) { Identifier identifier = (Identifier) visit(context.alias); subqueryRelation.setAlias(new TableName(null, identifier.getValue())); } else { subqueryRelation.setAlias(new TableName(null, null)); } subqueryRelation.setColumnOutputNames(getColumnNames(context.columnAliases())); return subqueryRelation; } @Override public ParseNode visitSubqueryExpression(StarRocksParser.SubqueryExpressionContext context) { QueryRelation queryRelation = (QueryRelation) visit(context.subquery()); return new Subquery(new QueryStatement(queryRelation)); } @Override public ParseNode visitInSubquery(StarRocksParser.InSubqueryContext context) { boolean isNotIn = context.NOT() != null; QueryRelation query = (QueryRelation) visit(context.queryRelation()); return new InPredicate((Expr) visit(context.value), new Subquery(new QueryStatement(query)), isNotIn, createPos(context)); } @Override public ParseNode visitTupleInSubquery(StarRocksParser.TupleInSubqueryContext context) { boolean isNotIn = context.NOT() != null; QueryRelation query = (QueryRelation) visit(context.queryRelation()); List<Expr> tupleExpressions = visit(context.expression(), Expr.class); return new MultiInPredicate(tupleExpressions, new Subquery(new QueryStatement(query)), isNotIn, createPos(context)); } @Override public ParseNode visitExists(StarRocksParser.ExistsContext context) { QueryRelation query = (QueryRelation) visit(context.queryRelation()); return new ExistsPredicate(new Subquery(new QueryStatement(query)), false, createPos(context)); } @Override public ParseNode visitScalarSubquery(StarRocksParser.ScalarSubqueryContext context) { BinaryType op = getComparisonOperator(((TerminalNode) context.comparisonOperator().getChild(0)) .getSymbol()); Subquery subquery = new Subquery(new QueryStatement((QueryRelation) visit(context.queryRelation()))); return new BinaryPredicate(op, (Expr) visit(context.booleanExpression()), subquery, createPos(context)); } @Override public ParseNode visitShowFunctionsStatement(StarRocksParser.ShowFunctionsStatementContext context) { boolean isBuiltIn = context.BUILTIN() != null; boolean isGlobal = context.GLOBAL() != null; boolean isVerbose = context.FULL() != null; String dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db).toString(); } String pattern = null; if (context.pattern != null) { pattern = ((StringLiteral) visit(context.pattern)).getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowFunctionsStmt(dbName, isBuiltIn, isGlobal, isVerbose, pattern, where, createPos(context)); } @Override public ParseNode visitShowPrivilegesStatement(StarRocksParser.ShowPrivilegesStatementContext ctx) { return new ShowPrivilegesStmt(); } @Override public ParseNode visitDropFunctionStatement(StarRocksParser.DropFunctionStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); String functionName = qualifiedName.toString(); boolean isGlobal = context.GLOBAL() != null; FunctionName fnName = FunctionName.createFnName(functionName); if (isGlobal) { if (!Strings.isNullOrEmpty(fnName.getDb())) { throw new ParsingException(PARSER_ERROR_MSG.invalidUDFName(functionName), qualifiedName.getPos()); } fnName.setAsGlobalFunction(); } return new DropFunctionStmt(fnName, getFunctionArgsDef(context.typeList()), createPos(context)); } @Override public ParseNode visitCreateFunctionStatement(StarRocksParser.CreateFunctionStatementContext context) { String functionType = "SCALAR"; boolean isGlobal = context.GLOBAL() != null; if (context.functionType != null) { functionType = context.functionType.getText(); } QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); String functionName = qualifiedName.toString(); TypeDef returnTypeDef = new TypeDef(getType(context.returnType), createPos(context.returnType)); TypeDef intermediateType = null; if (context.intermediateType != null) { intermediateType = new TypeDef(getType(context.intermediateType), createPos(context.intermediateType)); } Map<String, String> properties = null; if (context.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } FunctionName fnName = FunctionName.createFnName(functionName); if (isGlobal) { if (!Strings.isNullOrEmpty(fnName.getDb())) { throw new ParsingException(PARSER_ERROR_MSG.invalidUDFName(functionName), qualifiedName.getPos()); } fnName.setAsGlobalFunction(); } return new CreateFunctionStmt(functionType, fnName, getFunctionArgsDef(context.typeList()), returnTypeDef, intermediateType, properties); } @Override public ParseNode visitCreateUserStatement(StarRocksParser.CreateUserStatementContext context) { UserDesc userDesc; Token start = context.user().start; Token stop; UserIdentity user = (UserIdentity) visit(context.user()); UserAuthOption authOption = context.authOption() == null ? null : (UserAuthOption) visit(context.authOption()); if (authOption == null) { userDesc = new UserDesc(user, "", false, user.getPos()); } else if (authOption.getAuthPlugin() == null) { stop = context.authOption().stop; userDesc = new UserDesc(user, authOption.getPassword(), authOption.isPasswordPlain(), createPos(start, stop)); } else { stop = context.authOption().stop; userDesc = new UserDesc(user, authOption.getAuthPlugin(), authOption.getAuthString(), authOption.isPasswordPlain(), createPos(start, stop)); } boolean ifNotExists = context.IF() != null; List<String> roles = new ArrayList<>(); if (context.roleList() != null) { roles.addAll(context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(toList())); } return new CreateUserStmt(ifNotExists, userDesc, roles, createPos(context)); } @Override public ParseNode visitDropUserStatement(StarRocksParser.DropUserStatementContext context) { UserIdentity user = (UserIdentity) visit(context.user()); return new DropUserStmt(user, context.EXISTS() != null, createPos(context)); } @Override public ParseNode visitAlterUserStatement(StarRocksParser.AlterUserStatementContext context) { UserDesc userDesc; UserIdentity user = (UserIdentity) visit(context.user()); Token start = context.user().start; Token stop; if (context.ROLE() != null) { List<String> roles = new ArrayList<>(); if (context.roleList() != null) { roles.addAll(context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(toList())); } SetRoleType setRoleType; if (context.ALL() != null) { setRoleType = SetRoleType.ALL; } else if (context.NONE() != null) { setRoleType = SetRoleType.NONE; } else { setRoleType = SetRoleType.ROLE; } return new SetDefaultRoleStmt(user, setRoleType, roles, createPos(context)); } stop = context.authOption().stop; UserAuthOption authOption = (UserAuthOption) visit(context.authOption()); if (authOption.getAuthPlugin() == null) { userDesc = new UserDesc(user, authOption.getPassword(), authOption.isPasswordPlain(), createPos(start, stop)); } else { userDesc = new UserDesc(user, authOption.getAuthPlugin(), authOption.getAuthString(), authOption.isPasswordPlain(), createPos(start, stop)); } return new AlterUserStmt(userDesc, context.EXISTS() != null, createPos(context)); } @Override public ParseNode visitShowUserStatement(StarRocksParser.ShowUserStatementContext context) { NodePosition pos = createPos(context); if (context.USERS() != null) { return new ShowUserStmt(true, pos); } else { return new ShowUserStmt(false, pos); } } @Override public ParseNode visitShowAllAuthentication(StarRocksParser.ShowAllAuthenticationContext context) { return new ShowAuthenticationStmt(null, true, createPos(context)); } @Override public ParseNode visitShowAuthenticationForUser(StarRocksParser.ShowAuthenticationForUserContext context) { NodePosition pos = createPos(context); if (context.user() != null) { return new ShowAuthenticationStmt((UserIdentity) visit(context.user()), false, pos); } else { return new ShowAuthenticationStmt(null, false, pos); } } @Override public ParseNode visitExecuteAsStatement(StarRocksParser.ExecuteAsStatementContext context) { boolean allowRevert = context.WITH() == null; return new ExecuteAsStmt((UserIdentity) visit(context.user()), allowRevert, createPos(context)); } @Override public ParseNode visitCreateRoleStatement(StarRocksParser.CreateRoleStatementContext context) { List<String> roles = context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(Collectors.toList()); String comment = context.comment() == null ? "" : ((StringLiteral) visit(context.comment())).getStringValue(); return new CreateRoleStmt(roles, context.NOT() != null, comment, createPos(context)); } @Override public ParseNode visitAlterRoleStatement(StarRocksParser.AlterRoleStatementContext context) { List<String> roles = context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(Collectors.toList()); StringLiteral stringLiteral = (StringLiteral) visit(context.string()); String comment = stringLiteral.getStringValue(); return new AlterRoleStmt(roles, context.IF() != null, comment); } @Override public ParseNode visitDropRoleStatement(StarRocksParser.DropRoleStatementContext context) { List<String> roles = new ArrayList<>(); roles.addAll(context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(toList())); return new DropRoleStmt(roles, context.EXISTS() != null, createPos(context)); } @Override public ParseNode visitShowRolesStatement(StarRocksParser.ShowRolesStatementContext context) { return new ShowRolesStmt(); } @Override public ParseNode visitGrantRoleToUser(StarRocksParser.GrantRoleToUserContext context) { List<String> roleNameList = new ArrayList<>(); for (StarRocksParser.IdentifierOrStringContext oneContext : context.identifierOrStringList() .identifierOrString()) { roleNameList.add(((Identifier) visit(oneContext)).getValue()); } return new GrantRoleStmt(roleNameList, (UserIdentity) visit(context.user()), createPos(context)); } @Override public ParseNode visitGrantRoleToRole(StarRocksParser.GrantRoleToRoleContext context) { List<String> roleNameList = new ArrayList<>(); for (StarRocksParser.IdentifierOrStringContext oneContext : context.identifierOrStringList() .identifierOrString()) { roleNameList.add(((Identifier) visit(oneContext)).getValue()); } return new GrantRoleStmt(roleNameList, ((Identifier) visit(context.identifierOrString())).getValue(), createPos(context)); } @Override public ParseNode visitRevokeRoleFromUser(StarRocksParser.RevokeRoleFromUserContext context) { List<String> roleNameList = new ArrayList<>(); for (StarRocksParser.IdentifierOrStringContext oneContext : context.identifierOrStringList() .identifierOrString()) { roleNameList.add(((Identifier) visit(oneContext)).getValue()); } return new RevokeRoleStmt(roleNameList, (UserIdentity) visit(context.user()), createPos(context)); } @Override public ParseNode visitRevokeRoleFromRole(StarRocksParser.RevokeRoleFromRoleContext context) { List<String> roleNameList = new ArrayList<>(); for (StarRocksParser.IdentifierOrStringContext oneContext : context.identifierOrStringList() .identifierOrString()) { roleNameList.add(((Identifier) visit(oneContext)).getValue()); } return new RevokeRoleStmt(roleNameList, ((Identifier) visit(context.identifierOrString())).getValue(), createPos(context)); } @Override public ParseNode visitSetRoleStatement(StarRocksParser.SetRoleStatementContext context) { List<String> roles = new ArrayList<>(); if (context.roleList() != null) { roles.addAll(context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(toList())); } SetRoleType setRoleType; if (context.ALL() != null) { setRoleType = SetRoleType.ALL; } else if (context.DEFAULT() != null) { setRoleType = SetRoleType.DEFAULT; } else if (context.NONE() != null) { setRoleType = SetRoleType.NONE; } else { setRoleType = SetRoleType.ROLE; } return new SetRoleStmt(setRoleType, roles, createPos(context)); } @Override public ParseNode visitSetDefaultRoleStatement(StarRocksParser.SetDefaultRoleStatementContext context) { List<String> roles = new ArrayList<>(); if (context.roleList() != null) { roles.addAll(context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(toList())); } SetRoleType setRoleType; if (context.ALL() != null) { setRoleType = SetRoleType.ALL; } else if (context.NONE() != null) { setRoleType = SetRoleType.NONE; } else { setRoleType = SetRoleType.ROLE; } return new SetDefaultRoleStmt((UserIdentity) visit(context.user()), setRoleType, roles, createPos(context)); } @Override public ParseNode visitShowGrantsStatement(StarRocksParser.ShowGrantsStatementContext context) { NodePosition pos = createPos(context); if (context.ROLE() != null) { Identifier role = (Identifier) visit(context.identifierOrString()); return new ShowGrantsStmt(null, role.getValue(), pos); } else { UserIdentity userId = context.user() == null ? null : (UserIdentity) visit(context.user()); return new ShowGrantsStmt(userId, null, pos); } } @Override public ParseNode visitAuthWithoutPlugin(StarRocksParser.AuthWithoutPluginContext context) { String password = ((StringLiteral) visit(context.string())).getStringValue(); boolean isPasswordPlain = context.PASSWORD() == null; return new UserAuthOption(password, null, null, isPasswordPlain, createPos(context)); } @Override public ParseNode visitAuthWithPlugin(StarRocksParser.AuthWithPluginContext context) { Identifier authPlugin = (Identifier) visit(context.identifierOrString()); String authString = context.string() == null ? null : ((StringLiteral) visit(context.string())).getStringValue(); boolean isPasswordPlain = context.AS() == null; return new UserAuthOption(null, authPlugin.getValue().toUpperCase(), authString, isPasswordPlain, createPos(context)); } @Override public ParseNode visitGrantRevokeClause(StarRocksParser.GrantRevokeClauseContext context) { NodePosition pos = createPos(context); if (context.user() != null) { UserIdentity user = (UserIdentity) visit(context.user()); return new GrantRevokeClause(user, null, pos); } else { String roleName = ((Identifier) visit(context.identifierOrString())).getValue(); return new GrantRevokeClause(null, roleName, pos); } } @Override public ParseNode visitGrantOnUser(StarRocksParser.GrantOnUserContext context) { List<String> privList = Collections.singletonList("IMPERSONATE"); GrantRevokeClause clause = (GrantRevokeClause) visit(context.grantRevokeClause()); List<UserIdentity> users = context.user().stream() .map(user -> (UserIdentity) visit(user)).collect(toList()); GrantRevokePrivilegeObjects objects = new GrantRevokePrivilegeObjects(); objects.setUserPrivilegeObjectList(users); return new GrantPrivilegeStmt(privList, "USER", clause, objects, context.WITH() != null, createPos(context)); } @Override public ParseNode visitRevokeOnUser(StarRocksParser.RevokeOnUserContext context) { List<String> privList = Collections.singletonList("IMPERSONATE"); GrantRevokeClause clause = (GrantRevokeClause) visit(context.grantRevokeClause()); List<UserIdentity> users = context.user().stream() .map(user -> (UserIdentity) visit(user)).collect(toList()); GrantRevokePrivilegeObjects objects = new GrantRevokePrivilegeObjects(); objects.setUserPrivilegeObjectList(users); return new RevokePrivilegeStmt(privList, "USER", clause, objects, createPos(context)); } @Override public ParseNode visitGrantOnTableBrief(StarRocksParser.GrantOnTableBriefContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); return new GrantPrivilegeStmt(privilegeList, "TABLE", (GrantRevokeClause) visit(context.grantRevokeClause()), parsePrivilegeObjectNameList(context.privObjectNameList()), context.WITH() != null, createPos(context)); } @Override public ParseNode visitRevokeOnTableBrief(StarRocksParser.RevokeOnTableBriefContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); return new RevokePrivilegeStmt(privilegeList, "TABLE", (GrantRevokeClause) visit(context.grantRevokeClause()), parsePrivilegeObjectNameList(context.privObjectNameList()), createPos(context)); } @Override public ParseNode visitGrantOnSystem(StarRocksParser.GrantOnSystemContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); return new GrantPrivilegeStmt(privilegeList, "SYSTEM", (GrantRevokeClause) visit(context.grantRevokeClause()), null, context.WITH() != null, createPos(context)); } @Override public ParseNode visitRevokeOnSystem(StarRocksParser.RevokeOnSystemContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); return new RevokePrivilegeStmt(privilegeList, "SYSTEM", (GrantRevokeClause) visit(context.grantRevokeClause()), null, createPos(context)); } @Override public ParseNode visitGrantOnPrimaryObj(StarRocksParser.GrantOnPrimaryObjContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); String objectTypeUnResolved = ((Identifier) visit(context.privObjectType())).getValue().toUpperCase(); return new GrantPrivilegeStmt(privilegeList, objectTypeUnResolved, (GrantRevokeClause) visit(context.grantRevokeClause()), parsePrivilegeObjectNameList(context.privObjectNameList()), context.WITH() != null, createPos(context)); } @Override public ParseNode visitRevokeOnPrimaryObj(StarRocksParser.RevokeOnPrimaryObjContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); String objectTypeUnResolved = ((Identifier) visit(context.privObjectType())).getValue().toUpperCase(); return new RevokePrivilegeStmt(privilegeList, objectTypeUnResolved, (GrantRevokeClause) visit(context.grantRevokeClause()), parsePrivilegeObjectNameList(context.privObjectNameList()), createPos(context)); } @Override public ParseNode visitGrantOnFunc(StarRocksParser.GrantOnFuncContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); GrantRevokePrivilegeObjects objects = buildGrantRevokePrivWithFunction(context.privFunctionObjectNameList(), context.GLOBAL() != null); return new GrantPrivilegeStmt(privilegeList, extendPrivilegeType(context.GLOBAL() != null, "FUNCTION"), (GrantRevokeClause) visit(context.grantRevokeClause()), objects, context.WITH() != null, createPos(context)); } @Override public ParseNode visitRevokeOnFunc(StarRocksParser.RevokeOnFuncContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); GrantRevokePrivilegeObjects objects = buildGrantRevokePrivWithFunction(context.privFunctionObjectNameList(), context.GLOBAL() != null); return new RevokePrivilegeStmt(privilegeList, extendPrivilegeType(context.GLOBAL() != null, "FUNCTION"), (GrantRevokeClause) visit(context.grantRevokeClause()), objects, createPos(context)); } private GrantRevokePrivilegeObjects buildGrantRevokePrivWithFunction( StarRocksParser.PrivFunctionObjectNameListContext context, boolean isGlobal) { List<Pair<FunctionName, FunctionArgsDef>> functions = new ArrayList<>(); int functionSize = context.qualifiedName().size(); List<StarRocksParser.TypeListContext> typeListContexts = context.typeList(); for (int i = 0; i < functionSize; ++i) { StarRocksParser.QualifiedNameContext qualifiedNameContext = context.qualifiedName(i); QualifiedName qualifiedName = getQualifiedName(qualifiedNameContext); FunctionName functionName; if (qualifiedName.getParts().size() == 1) { functionName = new FunctionName(qualifiedName.getParts().get(0)); } else if (qualifiedName.getParts().size() == 2) { functionName = new FunctionName(qualifiedName.getParts().get(0), qualifiedName.getParts().get(1)); } else { throw new SemanticException("Error function format " + qualifiedName); } if (isGlobal) { functionName.setAsGlobalFunction(); } FunctionArgsDef argsDef = getFunctionArgsDef(typeListContexts.get(i)); functions.add(Pair.create(functionName, argsDef)); } GrantRevokePrivilegeObjects objects = new GrantRevokePrivilegeObjects(); objects.setFunctions(functions); return objects; } public String extendPrivilegeType(boolean isGlobal, String type) { if (isGlobal) { if (type.equals("FUNCTIONS") || type.equals("FUNCTION")) { return "GLOBAL " + type; } } return type; } @Override public ParseNode visitGrantOnAll(StarRocksParser.GrantOnAllContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); String objectTypeUnResolved = ((Identifier) visit(context.privObjectTypePlural())).getValue().toUpperCase(); GrantRevokePrivilegeObjects objects = new GrantRevokePrivilegeObjects(); ArrayList<String> tokenList; if (context.isAll != null) { tokenList = Lists.newArrayList("*", "*"); } else if (context.IN() != null) { String dbName = ((Identifier) visit(context.identifierOrString())).getValue(); tokenList = Lists.newArrayList(dbName, "*"); } else { tokenList = Lists.newArrayList("*"); } objects.setPrivilegeObjectNameTokensList(Collections.singletonList(tokenList)); GrantPrivilegeStmt grantPrivilegeStmt = new GrantPrivilegeStmt(privilegeList, objectTypeUnResolved, (GrantRevokeClause) visit(context.grantRevokeClause()), objects, context.WITH() != null, createPos(context)); grantPrivilegeStmt.setGrantOnAll(); return grantPrivilegeStmt; } @Override public ParseNode visitRevokeOnAll(StarRocksParser.RevokeOnAllContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); String objectTypeUnResolved = ((Identifier) visit(context.privObjectTypePlural())).getValue().toUpperCase(); GrantRevokePrivilegeObjects objects = new GrantRevokePrivilegeObjects(); ArrayList<String> tokenList; if (context.isAll != null) { tokenList = Lists.newArrayList("*", "*"); } else if (context.IN() != null) { String dbName = ((Identifier) visit(context.identifierOrString())).getValue(); tokenList = Lists.newArrayList(dbName, "*"); } else { tokenList = Lists.newArrayList("*"); } objects.setPrivilegeObjectNameTokensList(Collections.singletonList(tokenList)); RevokePrivilegeStmt revokePrivilegeStmt = new RevokePrivilegeStmt(privilegeList, objectTypeUnResolved, (GrantRevokeClause) visit(context.grantRevokeClause()), objects, createPos(context)); revokePrivilegeStmt.setGrantOnAll(); return revokePrivilegeStmt; } @Override public ParseNode visitPrivilegeType(StarRocksParser.PrivilegeTypeContext context) { NodePosition pos = createPos(context); List<String> ps = new ArrayList<>(); for (int i = 0; i < context.getChildCount(); ++i) { ps.add(context.getChild(i).getText()); } return new Identifier(Joiner.on(" ").join(ps), pos); } @Override public ParseNode visitPrivObjectType(StarRocksParser.PrivObjectTypeContext context) { NodePosition pos = createPos(context); List<String> ps = new ArrayList<>(); for (int i = 0; i < context.getChildCount(); ++i) { ps.add(context.getChild(i).getText()); } return new Identifier(Joiner.on(" ").join(ps), pos); } @Override public ParseNode visitPrivObjectTypePlural(StarRocksParser.PrivObjectTypePluralContext context) { NodePosition pos = createPos(context); List<String> ps = new ArrayList<>(); for (int i = 0; i < context.getChildCount(); ++i) { ps.add(context.getChild(i).getText()); } return new Identifier(Joiner.on(" ").join(ps), pos); } private GrantRevokePrivilegeObjects parsePrivilegeObjectNameList( StarRocksParser.PrivObjectNameListContext context) { if (context == null) { return null; } GrantRevokePrivilegeObjects grantRevokePrivilegeObjects = new GrantRevokePrivilegeObjects(createPos(context)); List<List<String>> objectNameList = new ArrayList<>(); for (StarRocksParser.PrivObjectNameContext privObjectNameContext : context.privObjectName()) { objectNameList.add(privObjectNameContext.identifierOrStringOrStar().stream() .map(c -> ((Identifier) visit(c)).getValue()).collect(toList())); } grantRevokePrivilegeObjects.setPrivilegeObjectNameTokensList(objectNameList); return grantRevokePrivilegeObjects; } @Override public ParseNode visitCreateSecurityIntegrationStatement( StarRocksParser.CreateSecurityIntegrationStatementContext context) { String name = ((Identifier) visit(context.identifier())).getValue(); Map<String, String> propertyMap = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { propertyMap.put(property.getKey(), property.getValue()); } } return new CreateSecurityIntegrationStatement(name, propertyMap, createPos(context)); } @Override public ParseNode visitExpressionOrDefault(StarRocksParser.ExpressionOrDefaultContext context) { if (context.DEFAULT() != null) { return new DefaultValueExpr(createPos(context)); } else { return visit(context.expression()); } } @Override public ParseNode visitExpressionsWithDefault(StarRocksParser.ExpressionsWithDefaultContext context) { ArrayList<Expr> row = Lists.newArrayList(); for (int i = 0; i < context.expressionOrDefault().size(); ++i) { row.add((Expr) visit(context.expressionOrDefault(i))); } return new ValueList(row, createPos(context)); } @Override public ParseNode visitExpressionSingleton(StarRocksParser.ExpressionSingletonContext context) { return visit(context.expression()); } @Override public ParseNode visitLogicalNot(StarRocksParser.LogicalNotContext context) { return new CompoundPredicate(CompoundPredicate.Operator.NOT, (Expr) visit(context.expression()), null, createPos(context)); } @Override public ParseNode visitLogicalBinary(StarRocksParser.LogicalBinaryContext context) { Expr left = (Expr) visit(context.left); Expr right = (Expr) visit(context.right); return new CompoundPredicate(getLogicalBinaryOperator(context.operator), left, right, createPos(context)); } private static CompoundPredicate.Operator getLogicalBinaryOperator(Token token) { switch (token.getType()) { case StarRocksLexer.AND: case StarRocksLexer.LOGICAL_AND: return CompoundPredicate.Operator.AND; default: return CompoundPredicate.Operator.OR; } } @Override public ParseNode visitPredicate(StarRocksParser.PredicateContext context) { if (context.predicateOperations() != null) { return visit(context.predicateOperations()); } else if (context.tupleInSubquery() != null) { return visit(context.tupleInSubquery()); } else { return visit(context.valueExpression()); } } @Override public ParseNode visitIsNull(StarRocksParser.IsNullContext context) { Expr child = (Expr) visit(context.booleanExpression()); NodePosition pos = createPos(context); if (context.NOT() == null) { return new IsNullPredicate(child, false, pos); } else { return new IsNullPredicate(child, true, pos); } } @Override public ParseNode visitComparison(StarRocksParser.ComparisonContext context) { BinaryType op = getComparisonOperator(((TerminalNode) context.comparisonOperator().getChild(0)) .getSymbol()); return new BinaryPredicate(op, (Expr) visit(context.left), (Expr) visit(context.right), createPos(context)); } private static BinaryType getComparisonOperator(Token symbol) { switch (symbol.getType()) { case StarRocksParser.EQ: return BinaryType.EQ; case StarRocksParser.NEQ: return BinaryType.NE; case StarRocksParser.LT: return BinaryType.LT; case StarRocksParser.LTE: return BinaryType.LE; case StarRocksParser.GT: return BinaryType.GT; case StarRocksParser.GTE: return BinaryType.GE; default: return BinaryType.EQ_FOR_NULL; } } @Override public ParseNode visitInList(StarRocksParser.InListContext context) { boolean isNotIn = context.NOT() != null; return new InPredicate( (Expr) visit(context.value), visit(context.expressionList().expression(), Expr.class), isNotIn, createPos(context)); } @Override public ParseNode visitBetween(StarRocksParser.BetweenContext context) { boolean isNotBetween = context.NOT() != null; return new BetweenPredicate( (Expr) visit(context.value), (Expr) visit(context.lower), (Expr) visit(context.upper), isNotBetween, createPos(context)); } @Override public ParseNode visitLike(StarRocksParser.LikeContext context) { LikePredicate likePredicate; NodePosition pos = createPos(context); if (context.REGEXP() != null || context.RLIKE() != null) { likePredicate = new LikePredicate(LikePredicate.Operator.REGEXP, (Expr) visit(context.value), (Expr) visit(context.pattern), pos); } else { likePredicate = new LikePredicate( LikePredicate.Operator.LIKE, (Expr) visit(context.value), (Expr) visit(context.pattern), pos); } if (context.NOT() != null) { return new CompoundPredicate(CompoundPredicate.Operator.NOT, likePredicate, null, pos); } else { return likePredicate; } } @Override public ParseNode visitSimpleCase(StarRocksParser.SimpleCaseContext context) { return new CaseExpr( (Expr) visit(context.caseExpr), visit(context.whenClause(), CaseWhenClause.class), (Expr) visitIfPresent(context.elseExpression), createPos(context)); } @Override public ParseNode visitSearchedCase(StarRocksParser.SearchedCaseContext context) { return new CaseExpr( null, visit(context.whenClause(), CaseWhenClause.class), (Expr) visitIfPresent(context.elseExpression), createPos(context)); } @Override public ParseNode visitWhenClause(StarRocksParser.WhenClauseContext context) { return new CaseWhenClause((Expr) visit(context.condition), (Expr) visit(context.result), createPos(context)); } @Override public ParseNode visitArithmeticUnary(StarRocksParser.ArithmeticUnaryContext context) { Expr child = (Expr) visit(context.primaryExpression()); NodePosition pos = createPos(context); switch (context.operator.getType()) { case StarRocksLexer.MINUS_SYMBOL: if (child.isLiteral() && child.getType().isNumericType()) { try { ((LiteralExpr) child).swapSign(); } catch (NotImplementedException e) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExpr(child.toSql()), child.getPos()); } return child; } else { return new ArithmeticExpr(ArithmeticExpr.Operator.MULTIPLY, new IntLiteral(-1), child, pos); } case StarRocksLexer.PLUS_SYMBOL: return child; case StarRocksLexer.BITNOT: return new ArithmeticExpr(ArithmeticExpr.Operator.BITNOT, child, null, pos); default: return new CompoundPredicate(CompoundPredicate.Operator.NOT, child, null, pos); } } @Override public ParseNode visitArithmeticBinary(StarRocksParser.ArithmeticBinaryContext context) { Expr left = (Expr) visit(context.left); Expr right = (Expr) visit(context.right); NodePosition pos = createPos(context); if (left instanceof IntervalLiteral) { return new TimestampArithmeticExpr(getArithmeticBinaryOperator(context.operator), right, ((IntervalLiteral) left).getValue(), ((IntervalLiteral) left).getUnitIdentifier().getDescription(), true, pos); } if (right instanceof IntervalLiteral) { return new TimestampArithmeticExpr(getArithmeticBinaryOperator(context.operator), left, ((IntervalLiteral) right).getValue(), ((IntervalLiteral) right).getUnitIdentifier().getDescription(), false, pos); } return new ArithmeticExpr(getArithmeticBinaryOperator(context.operator), left, right, pos); } private static ArithmeticExpr.Operator getArithmeticBinaryOperator(Token operator) { switch (operator.getType()) { case StarRocksLexer.PLUS_SYMBOL: return ArithmeticExpr.Operator.ADD; case StarRocksLexer.MINUS_SYMBOL: return ArithmeticExpr.Operator.SUBTRACT; case StarRocksLexer.ASTERISK_SYMBOL: return ArithmeticExpr.Operator.MULTIPLY; case StarRocksLexer.SLASH_SYMBOL: return ArithmeticExpr.Operator.DIVIDE; case StarRocksLexer.PERCENT_SYMBOL: case StarRocksLexer.MOD: return ArithmeticExpr.Operator.MOD; case StarRocksLexer.INT_DIV: return ArithmeticExpr.Operator.INT_DIVIDE; case StarRocksLexer.BITAND: return ArithmeticExpr.Operator.BITAND; case StarRocksLexer.BITOR: return ArithmeticExpr.Operator.BITOR; case StarRocksLexer.BITXOR: return ArithmeticExpr.Operator.BITXOR; case StarRocksLexer.BIT_SHIFT_LEFT: return ArithmeticExpr.Operator.BIT_SHIFT_LEFT; case StarRocksLexer.BIT_SHIFT_RIGHT: return ArithmeticExpr.Operator.BIT_SHIFT_RIGHT; case StarRocksLexer.BIT_SHIFT_RIGHT_LOGICAL: return ArithmeticExpr.Operator.BIT_SHIFT_RIGHT_LOGICAL; default: throw new ParsingException(PARSER_ERROR_MSG.wrongTypeOfArgs(operator.getText()), new NodePosition(operator)); } } @Override public ParseNode visitOdbcFunctionCallExpression(StarRocksParser.OdbcFunctionCallExpressionContext context) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) visit(context.functionCall()); OdbcScalarFunctionCall odbcScalarFunctionCall = new OdbcScalarFunctionCall(functionCallExpr); return odbcScalarFunctionCall.mappingFunction(); } private static List<Expr> getArgumentsForTimeSlice(Expr time, Expr value, String ident, String boundary) { List<Expr> exprs = Lists.newLinkedList(); exprs.add(time); addArgumentUseTypeInt(value, exprs); exprs.add(new StringLiteral(ident)); exprs.add(new StringLiteral(boundary)); return exprs; } private static void addArgumentUseTypeInt(Expr value, List<Expr> exprs) { try { if (value instanceof IntLiteral) { exprs.add(new IntLiteral(((IntLiteral) value).getValue(), Type.INT)); } else { exprs.add(value); } } catch (Exception e) { throw new IllegalArgumentException(String.format("Cast argument %s to int type failed.", value.toSql())); } } @Override @Override public ParseNode visitAggregationFunctionCall(StarRocksParser.AggregationFunctionCallContext context) { NodePosition pos = createPos(context); String functionName; boolean isGroupConcat = false; boolean isLegacyGroupConcat = false; boolean isDistinct = false; if (context.aggregationFunction().COUNT() != null) { functionName = FunctionSet.COUNT; } else if (context.aggregationFunction().AVG() != null) { functionName = FunctionSet.AVG; } else if (context.aggregationFunction().SUM() != null) { functionName = FunctionSet.SUM; } else if (context.aggregationFunction().MIN() != null) { functionName = FunctionSet.MIN; } else if (context.aggregationFunction().ARRAY_AGG() != null) { functionName = FunctionSet.ARRAY_AGG; } else if (context.aggregationFunction().ARRAY_AGG_DISTINCT() != null) { functionName = FunctionSet.ARRAY_AGG; isDistinct = true; } else if (context.aggregationFunction().GROUP_CONCAT() != null) { functionName = FunctionSet.GROUP_CONCAT; isGroupConcat = true; isLegacyGroupConcat = SqlModeHelper.check(sqlMode, SqlModeHelper.MODE_GROUP_CONCAT_LEGACY); } else { functionName = FunctionSet.MAX; } List<OrderByElement> orderByElements = new ArrayList<>(); if (context.aggregationFunction().ORDER() != null) { orderByElements = visit(context.aggregationFunction().sortItem(), OrderByElement.class); } List<String> hints = Lists.newArrayList(); if (context.aggregationFunction().bracketHint() != null) { hints = context.aggregationFunction().bracketHint().identifier().stream().map( RuleContext::getText).collect(Collectors.toList()); } if (context.aggregationFunction().setQuantifier() != null) { isDistinct = context.aggregationFunction().setQuantifier().DISTINCT() != null; } if (isDistinct && CollectionUtils.isEmpty(context.aggregationFunction().expression())) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } List<Expr> exprs = visit(context.aggregationFunction().expression(), Expr.class); if (isGroupConcat && !exprs.isEmpty() && context.aggregationFunction().SEPARATOR() == null) { if (isLegacyGroupConcat) { if (exprs.size() == 1) { Expr sepExpr; String sep = ", "; sepExpr = new StringLiteral(sep, pos); exprs.add(sepExpr); } } else { Expr sepExpr; String sep = ","; sepExpr = new StringLiteral(sep, pos); exprs.add(sepExpr); } } if (!orderByElements.isEmpty()) { int exprSize = exprs.size(); if (isGroupConcat) { exprSize--; } for (OrderByElement orderByElement : orderByElements) { Expr by = orderByElement.getExpr(); if (by instanceof IntLiteral) { long ordinal = ((IntLiteral) by).getLongValue(); if (ordinal < 1 || ordinal > exprSize) { throw new ParsingException(format("ORDER BY position %s is not in %s output list", ordinal, functionName), pos); } by = exprs.get((int) ordinal - 1); orderByElement.setExpr(by); } } orderByElements = orderByElements.stream().filter(x -> !x.getExpr().isConstant()).collect(toList()); } if (CollectionUtils.isNotEmpty(orderByElements)) { orderByElements.stream().forEach(e -> exprs.add(e.getExpr())); } FunctionCallExpr functionCallExpr = new FunctionCallExpr(functionName, context.aggregationFunction().ASTERISK_SYMBOL() == null ? new FunctionParams(isDistinct, exprs, orderByElements) : FunctionParams.createStarParam(), pos); functionCallExpr = SyntaxSugars.parse(functionCallExpr); functionCallExpr.setHints(hints); if (context.over() != null) { return buildOverClause(functionCallExpr, context.over(), pos); } return functionCallExpr; } @Override public ParseNode visitWindowFunctionCall(StarRocksParser.WindowFunctionCallContext context) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) visit(context.windowFunction()); return buildOverClause(functionCallExpr, context.over(), createPos(context)); } @Override public ParseNode visitWindowFunction(StarRocksParser.WindowFunctionContext context) { FunctionCallExpr functionCallExpr = new FunctionCallExpr(context.name.getText().toLowerCase(), new FunctionParams(false, visit(context.expression(), Expr.class)), createPos(context)); functionCallExpr = SyntaxSugars.parse(functionCallExpr); boolean ignoreNull = CollectionUtils.isNotEmpty(context.ignoreNulls()) && context.ignoreNulls().stream().anyMatch(Objects::nonNull); functionCallExpr.setIgnoreNulls(ignoreNull); return functionCallExpr; } private AnalyticExpr buildOverClause(FunctionCallExpr functionCallExpr, StarRocksParser.OverContext context, NodePosition pos) { functionCallExpr.setIsAnalyticFnCall(true); List<OrderByElement> orderByElements = new ArrayList<>(); if (context.ORDER() != null) { orderByElements = visit(context.sortItem(), OrderByElement.class); } List<Expr> partitionExprs = visit(context.partition, Expr.class); return new AnalyticExpr(functionCallExpr, partitionExprs, orderByElements, (AnalyticWindow) visitIfPresent(context.windowFrame()), context.bracketHint() == null ? null : context.bracketHint().identifier().stream() .map(RuleContext::getText).collect(toList()), pos); } @Override public ParseNode visitExtract(StarRocksParser.ExtractContext context) { String fieldString = context.identifier().getText(); return new FunctionCallExpr(fieldString, new FunctionParams(Lists.newArrayList((Expr) visit(context.valueExpression()))), createPos(context)); } @Override public ParseNode visitCast(StarRocksParser.CastContext context) { return new CastExpr(new TypeDef(getType(context.type())), (Expr) visit(context.expression()), createPos(context)); } @Override public ParseNode visitConvert(StarRocksParser.ConvertContext context) { return new CastExpr(new TypeDef(getType(context.type())), (Expr) visit(context.expression()), createPos(context)); } @Override public ParseNode visitInformationFunctionExpression(StarRocksParser.InformationFunctionExpressionContext context) { return new InformationFunction(context.name.getText().toUpperCase(), createPos(context)); } @Override public ParseNode visitSpecialDateTimeExpression(StarRocksParser.SpecialDateTimeExpressionContext context) { return new FunctionCallExpr(context.name.getText().toUpperCase(), Lists.newArrayList()); } @Override public ParseNode visitSpecialFunctionExpression(StarRocksParser.SpecialFunctionExpressionContext context) { NodePosition pos = createPos(context); if (context.CHAR() != null) { return new FunctionCallExpr("char", visit(context.expression(), Expr.class), pos); } else if (context.DAY() != null) { return new FunctionCallExpr("day", visit(context.expression(), Expr.class), pos); } else if (context.HOUR() != null) { return new FunctionCallExpr("hour", visit(context.expression(), Expr.class), pos); } else if (context.IF() != null) { return new FunctionCallExpr("if", visit(context.expression(), Expr.class), pos); } else if (context.LEFT() != null) { return new FunctionCallExpr("left", visit(context.expression(), Expr.class), pos); } else if (context.LIKE() != null) { return new FunctionCallExpr("like", visit(context.expression(), Expr.class), pos); } else if (context.MINUTE() != null) { return new FunctionCallExpr("minute", visit(context.expression(), Expr.class), pos); } else if (context.MOD() != null) { return new FunctionCallExpr("mod", visit(context.expression(), Expr.class), pos); } else if (context.MONTH() != null) { return new FunctionCallExpr("month", visit(context.expression(), Expr.class), pos); } else if (context.QUARTER() != null) { return new FunctionCallExpr("quarter", visit(context.expression(), Expr.class), pos); } else if (context.REGEXP() != null) { return new FunctionCallExpr("regexp", visit(context.expression(), Expr.class), pos); } else if (context.REPLACE() != null) { return new FunctionCallExpr("replace", visit(context.expression(), Expr.class), pos); } else if (context.RIGHT() != null) { return new FunctionCallExpr("right", visit(context.expression(), Expr.class), pos); } else if (context.RLIKE() != null) { return new FunctionCallExpr("regexp", visit(context.expression(), Expr.class), pos); } else if (context.SECOND() != null) { return new FunctionCallExpr("second", visit(context.expression(), Expr.class), pos); } else if (context.YEAR() != null) { return new FunctionCallExpr("year", visit(context.expression(), Expr.class), pos); } else if (context.PASSWORD() != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.string()); return new StringLiteral(new String(MysqlPassword.makeScrambledPassword(stringLiteral.getValue())), pos); } else if (context.FLOOR() != null) { return new FunctionCallExpr("floor", visit(context.expression(), Expr.class), pos); } else if (context.CEIL() != null) { return new FunctionCallExpr("ceil", visit(context.expression(), Expr.class), pos); } String functionName = context.TIMESTAMPADD() != null ? "TIMESTAMPADD" : "TIMESTAMPDIFF"; UnitIdentifier e1 = (UnitIdentifier) visit(context.unitIdentifier()); Expr e2 = (Expr) visit(context.expression(0)); Expr e3 = (Expr) visit(context.expression(1)); return new TimestampArithmeticExpr(functionName, e3, e2, e1.getDescription(), pos); } @Override public ParseNode visitConcat(StarRocksParser.ConcatContext context) { Expr left = (Expr) visit(context.left); Expr right = (Expr) visit(context.right); return new FunctionCallExpr("concat", new FunctionParams(Lists.newArrayList(left, right)), createPos(context)); } @Override public ParseNode visitNullLiteral(StarRocksParser.NullLiteralContext context) { return new NullLiteral(createPos(context)); } @Override public ParseNode visitBooleanLiteral(StarRocksParser.BooleanLiteralContext context) { NodePosition pos = createPos(context); String value = context.getText(); return new BoolLiteral("TRUE".equalsIgnoreCase(value), pos); } @Override public ParseNode visitNumericLiteral(StarRocksParser.NumericLiteralContext context) { return visit(context.number()); } @Override public ParseNode visitIntegerValue(StarRocksParser.IntegerValueContext context) { NodePosition pos = createPos(context); try { BigInteger intLiteral = new BigInteger(context.getText()); if (intLiteral.compareTo(LONG_MAX) <= 0) { return new IntLiteral(intLiteral.longValue(), pos); } else if (intLiteral.compareTo(LARGEINT_MAX_ABS) <= 0) { return new LargeIntLiteral(intLiteral.toString(), pos); } else { throw new ParsingException(PARSER_ERROR_MSG.numOverflow(context.getText()), pos); } } catch (NumberFormatException | AnalysisException e) { throw new ParsingException(PARSER_ERROR_MSG.invalidNumFormat(context.getText()), pos); } } @Override public ParseNode visitDoubleValue(StarRocksParser.DoubleValueContext context) { NodePosition pos = createPos(context); try { if (SqlModeHelper.check(sqlMode, SqlModeHelper.MODE_DOUBLE_LITERAL)) { return new FloatLiteral(context.getText(), pos); } else { BigDecimal decimal = new BigDecimal(context.getText()); int precision = DecimalLiteral.getRealPrecision(decimal); int scale = DecimalLiteral.getRealScale(decimal); int integerPartWidth = precision - scale; if (integerPartWidth > 38) { return new FloatLiteral(context.getText(), pos); } return new DecimalLiteral(decimal, pos); } } catch (AnalysisException | NumberFormatException e) { throw new ParsingException(PARSER_ERROR_MSG.invalidNumFormat(context.getText()), pos); } } @Override public ParseNode visitDecimalValue(StarRocksParser.DecimalValueContext context) { NodePosition pos = createPos(context); try { if (SqlModeHelper.check(sqlMode, SqlModeHelper.MODE_DOUBLE_LITERAL)) { return new FloatLiteral(context.getText(), pos); } else { return new DecimalLiteral(context.getText(), pos); } } catch (AnalysisException e) { throw new ParsingException(PARSER_ERROR_MSG.invalidNumFormat(context.getText()), pos); } } @Override public ParseNode visitDateLiteral(StarRocksParser.DateLiteralContext context) { NodePosition pos = createPos(context); String value = ((StringLiteral) visit(context.string())).getValue(); try { if (context.DATE() != null) { return new DateLiteral(value, Type.DATE); } else { return new DateLiteral(value, Type.DATETIME); } } catch (AnalysisException e) { throw new ParsingException(PARSER_ERROR_MSG.invalidDateFormat(value), pos); } } @Override public ParseNode visitString(StarRocksParser.StringContext context) { String quotedString; NodePosition pos = createPos(context); if (context.SINGLE_QUOTED_TEXT() != null) { quotedString = context.SINGLE_QUOTED_TEXT().getText(); quotedString = quotedString.substring(1, quotedString.length() - 1).replace("''", "'"); } else { quotedString = context.DOUBLE_QUOTED_TEXT().getText(); quotedString = quotedString.substring(1, quotedString.length() - 1).replace("\"\"", "\""); } return new StringLiteral(escapeBackSlash(quotedString), pos); } @Override public ParseNode visitBinary(StarRocksParser.BinaryContext context) { String quotedText; if (context.BINARY_SINGLE_QUOTED_TEXT() != null) { quotedText = context.BINARY_SINGLE_QUOTED_TEXT().getText(); } else { quotedText = context.BINARY_DOUBLE_QUOTED_TEXT().getText(); } return new VarBinaryLiteral(quotedText.substring(2, quotedText.length() - 1), createPos(context)); } private static String escapeBackSlash(String str) { StringWriter writer = new StringWriter(); int strLen = str.length(); for (int i = 0; i < strLen; ++i) { char c = str.charAt(i); if (c == '\\' && (i + 1) < strLen) { switch (str.charAt(i + 1)) { case 'n': writer.append('\n'); break; case 't': writer.append('\t'); break; case 'r': writer.append('\r'); break; case 'b': writer.append('\b'); break; case '0': writer.append('\0'); break; case 'Z': writer.append('\032'); break; case '_': case '%': writer.append('\\'); /* Fall through */ default: writer.append(str.charAt(i + 1)); break; } i++; } else { writer.append(c); } } return writer.toString(); } @Override public ParseNode visitArrayConstructor(StarRocksParser.ArrayConstructorContext context) { NodePosition pos = createPos(context); Type type = null; if (context.arrayType() != null) { type = new ArrayType(getType(context.arrayType().type())); } List<Expr> exprs; if (context.expressionList() != null) { exprs = visit(context.expressionList().expression(), Expr.class); } else { exprs = Collections.emptyList(); } return new ArrayExpr(type, exprs, pos); } @Override public ParseNode visitMapExpression(StarRocksParser.MapExpressionContext context) { ArrayList<Expr> row = Lists.newArrayList(); Expr key = (Expr) visit(context.key); Expr value = (Expr) visit(context.value); row.add(key); row.add(value); return new ValueList(row, createPos(context)); } @Override public ParseNode visitMapConstructor(StarRocksParser.MapConstructorContext context) { NodePosition pos = createPos(context); Type type = Type.ANY_MAP; if (context.mapType() != null) { type = getMapType(context.mapType()); } List<Expr> exprs; if (context.mapExpressionList() != null) { List<ValueList> rowValues = visit(context.mapExpressionList().mapExpression(), ValueList.class); List<List<Expr>> rows = rowValues.stream().map(ValueList::getRow).collect(toList()); exprs = rows.stream().flatMap(Collection::stream).collect(Collectors.toList()); int num = exprs.size(); if (num % 2 == 1) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(num, "map()", "Arguments must be in key/value pairs"), pos); } } else { exprs = Collections.emptyList(); } return new MapExpr(type, exprs, pos); } @Override public ParseNode visitCollectionSubscript(StarRocksParser.CollectionSubscriptContext context) { Expr value = (Expr) visit(context.value); Expr index = (Expr) visit(context.index); return new CollectionElementExpr(value, index, false); } @Override public ParseNode visitArraySlice(StarRocksParser.ArraySliceContext context) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExpr("array slice"), createPos(context)); /* Expr expr = (Expr) visit(context.primaryExpression()); IntLiteral lowerBound; if (context.start != null) { lowerBound = new IntLiteral(Long.parseLong(context.start.getText())); } else { lowerBound = new IntLiteral(0); } IntLiteral upperBound; if (context.end != null) { upperBound = new IntLiteral(Long.parseLong(context.end.getText())); } else { upperBound = new IntLiteral(-1); } return new ArraySliceExpr(expr, lowerBound, upperBound); */ } @Override public ParseNode visitInterval(StarRocksParser.IntervalContext context) { return new IntervalLiteral((Expr) visit(context.value), (UnitIdentifier) visit(context.from), createPos(context)); } @Override public ParseNode visitUnitIdentifier(StarRocksParser.UnitIdentifierContext context) { return new UnitIdentifier(context.getText(), createPos(context)); } @Override public ParseNode visitUnitBoundary(StarRocksParser.UnitBoundaryContext context) { return new UnitBoundary(context.getText(), createPos(context)); } @Override public ParseNode visitDereference(StarRocksParser.DereferenceContext ctx) { Expr base = (Expr) visit(ctx.base); NodePosition pos = createPos(ctx); String fieldName; if (ctx.DOT_IDENTIFIER() != null) { fieldName = ctx.DOT_IDENTIFIER().getText().substring(1); } else { fieldName = ((Identifier) visit(ctx.fieldName)).getValue(); } if (base instanceof SlotRef) { SlotRef tmp = (SlotRef) base; List<String> parts = new ArrayList<>(tmp.getQualifiedName().getParts()); parts.add(fieldName); return new SlotRef(QualifiedName.of(parts, pos)); } else if (base instanceof SubfieldExpr) { SubfieldExpr subfieldExpr = (SubfieldExpr) base; ImmutableList.Builder<String> builder = new ImmutableList.Builder<>(); for (String tmpFieldName : subfieldExpr.getFieldNames()) { builder.add(tmpFieldName); } builder.add(fieldName); return new SubfieldExpr(subfieldExpr.getChild(0), builder.build(), pos); } else { return new SubfieldExpr(base, ImmutableList.of(fieldName), pos); } } @Override public ParseNode visitColumnReference(StarRocksParser.ColumnReferenceContext context) { Identifier identifier = (Identifier) visit(context.identifier()); List<String> parts = new ArrayList<>(); parts.add(identifier.getValue()); QualifiedName qualifiedName = QualifiedName.of(parts, createPos(context)); return new SlotRef(qualifiedName); } @Override public ParseNode visitArrowExpression(StarRocksParser.ArrowExpressionContext context) { Expr expr = (Expr) visit(context.primaryExpression()); StringLiteral stringLiteral = (StringLiteral) visit(context.string()); return new ArrowExpr(expr, stringLiteral, createPos(context)); } @Override public ParseNode visitLambdaFunctionExpr(StarRocksParser.LambdaFunctionExprContext context) { List<String> names = Lists.newLinkedList(); if (context.identifierList() != null) { final List<Identifier> identifierList = visit(context.identifierList().identifier(), Identifier.class); names = identifierList.stream().map(Identifier::getValue).collect(toList()); } else { names.add(((Identifier) visit(context.identifier())).getValue()); } List<Expr> arguments = Lists.newLinkedList(); Expr expr = null; if (context.expression() != null) { expr = (Expr) visit(context.expression()); } else if (context.expressionList() != null) { List<Expr> exprs = visit(context.expressionList().expression(), Expr.class); if (exprs.size() != 2) { throw new IllegalArgumentException("The right part of map lambda functions can accept at most 2 " + "expressions, but there are " + exprs.size()); } expr = new MapExpr(Type.ANY_MAP, exprs); } arguments.add(expr); for (int i = 0; i < names.size(); ++i) { arguments.add(new LambdaArgument(names.get(i))); } return new LambdaFunctionExpr(arguments); } @Override public ParseNode visitUserVariable(StarRocksParser.UserVariableContext context) { String variable = ((Identifier) visit(context.identifierOrString())).getValue(); return new VariableExpr(variable, SetType.USER, createPos(context)); } @Override public ParseNode visitSystemVariable(StarRocksParser.SystemVariableContext context) { SetType setType = getVariableType(context.varType()); return new VariableExpr(((Identifier) visit(context.identifier())).getValue(), setType, createPos(context)); } @Override public ParseNode visitCollate(StarRocksParser.CollateContext context) { return visit(context.primaryExpression()); } @Override public ParseNode visitParenthesizedExpression(StarRocksParser.ParenthesizedExpressionContext context) { return visit(context.expression()); } @Override public ParseNode visitUnquotedIdentifier(StarRocksParser.UnquotedIdentifierContext context) { return new Identifier(context.getText(), createPos(context)); } @Override public ParseNode visitBackQuotedIdentifier(StarRocksParser.BackQuotedIdentifierContext context) { return new Identifier(context.getText().replace("`", ""), createPos(context)); } @Override public ParseNode visitDigitIdentifier(StarRocksParser.DigitIdentifierContext context) { return new Identifier(context.getText(), createPos(context)); } @Override public ParseNode visitDictionaryGetExpr(StarRocksParser.DictionaryGetExprContext context) { List<Expr> params = visit(context.expressionList().expression(), Expr.class); return new DictionaryGetExpr(params); } private static StatementBase.ExplainLevel getExplainType(StarRocksParser.ExplainDescContext context) { StatementBase.ExplainLevel explainLevel = StatementBase.ExplainLevel.NORMAL; if (context.LOGICAL() != null) { explainLevel = StatementBase.ExplainLevel.LOGICAL; } else if (context.ANALYZE() != null) { explainLevel = StatementBase.ExplainLevel.ANALYZE; } else if (context.VERBOSE() != null) { explainLevel = StatementBase.ExplainLevel.VERBOSE; } else if (context.COSTS() != null) { explainLevel = StatementBase.ExplainLevel.COST; } else if (context.SCHEDULER() != null) { explainLevel = StatementBase.ExplainLevel.SCHEDULER; } return explainLevel; } public static SetType getVariableType(StarRocksParser.VarTypeContext context) { if (context == null) { return null; } if (context.GLOBAL() != null) { return SetType.GLOBAL; } else if (context.VERBOSE() != null) { return SetType.VERBOSE; } else { return SetType.SESSION; } } @Override public ParseNode visitAssignment(StarRocksParser.AssignmentContext context) { String column = ((Identifier) visit(context.identifier())).getValue(); Expr expr = (Expr) visit(context.expressionOrDefault()); return new ColumnAssignment(column, expr, createPos(context)); } @Override public ParseNode visitPartitionDesc(StarRocksParser.PartitionDescContext context) { List<PartitionDesc> partitionDescList = new ArrayList<>(); StarRocksParser.IdentifierListContext identifierListContext = context.identifierList(); if (context.functionCall() != null) { for (StarRocksParser.RangePartitionDescContext rangePartitionDescContext : context.rangePartitionDesc()) { final PartitionDesc rangePartitionDesc = (PartitionDesc) visit(rangePartitionDescContext); partitionDescList.add(rangePartitionDesc); } FunctionCallExpr functionCallExpr = (FunctionCallExpr) visit(context.functionCall()); List<String> columnList = AnalyzerUtils.checkAndExtractPartitionCol(functionCallExpr, null); RangePartitionDesc rangePartitionDesc = new RangePartitionDesc(columnList, partitionDescList); return new ExpressionPartitionDesc(rangePartitionDesc, functionCallExpr); } List<Identifier> identifierList = visit(identifierListContext.identifier(), Identifier.class); if (context.LIST() == null && context.RANGE() == null) { List<String> columnList = identifierList.stream().map(Identifier::getValue).collect(toList()); return new ListPartitionDesc(columnList, new ArrayList<>()); } else { List<PartitionDesc> partitionDesc = visit(context.rangePartitionDesc(), PartitionDesc.class); return new RangePartitionDesc( identifierList.stream().map(Identifier::getValue).collect(toList()), partitionDesc, createPos(context)); } } @Override public ParseNode visitSingleRangePartition(StarRocksParser.SingleRangePartitionContext context) { PartitionKeyDesc partitionKeyDesc = (PartitionKeyDesc) visit(context.partitionKeyDesc()); boolean ifNotExists = context.IF() != null; Map<String, String> properties = null; if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new SingleRangePartitionDesc(ifNotExists, ((Identifier) visit(context.identifier())).getValue(), partitionKeyDesc, properties, createPos(context)); } @Override public ParseNode visitMultiRangePartition(StarRocksParser.MultiRangePartitionContext context) { NodePosition pos = createPos(context); if (context.interval() != null) { IntervalLiteral intervalLiteral = (IntervalLiteral) visit(context.interval()); Expr expr = intervalLiteral.getValue(); long intervalVal; if (expr instanceof IntLiteral) { intervalVal = ((IntLiteral) expr).getLongValue(); } else { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(expr.toSql(), "RANGE DESC"), expr.getPos()); } return new MultiRangePartitionDesc( ((StringLiteral) visit(context.string(0))).getStringValue(), ((StringLiteral) visit(context.string(1))).getStringValue(), intervalVal, intervalLiteral.getUnitIdentifier().getDescription(), pos); } else { return new MultiRangePartitionDesc( ((StringLiteral) visit(context.string(0))).getStringValue(), ((StringLiteral) visit(context.string(1))).getStringValue(), Long.parseLong(context.INTEGER_VALUE().getText()), null, pos); } } @Override public ParseNode visitPartitionRangeDesc(StarRocksParser.PartitionRangeDescContext context) { return new PartitionRangeDesc( ((StringLiteral) visit(context.string(0))).getStringValue(), ((StringLiteral) visit(context.string(1))).getStringValue(), createPos(context)); } @Override public ParseNode visitSingleItemListPartitionDesc(StarRocksParser.SingleItemListPartitionDescContext context) { List<String> values = context.stringList().string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()) .collect(toList()); boolean ifNotExists = context.IF() != null; Map<String, String> properties = null; if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new SingleItemListPartitionDesc(ifNotExists, ((Identifier) visit(context.identifier())).getValue(), values, properties, createPos(context)); } @Override public ParseNode visitMultiItemListPartitionDesc(StarRocksParser.MultiItemListPartitionDescContext context) { boolean ifNotExists = context.IF() != null; List<List<String>> multiValues = new ArrayList<>(); for (StarRocksParser.StringListContext stringListContext : context.stringList()) { List<String> values = stringListContext.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()) .collect(toList()); multiValues.add(values); } Map<String, String> properties = null; if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new MultiItemListPartitionDesc(ifNotExists, ((Identifier) visit(context.identifier())).getValue(), multiValues, properties, createPos(context)); } @Override public ParseNode visitPartitionKeyDesc(StarRocksParser.PartitionKeyDescContext context) { PartitionKeyDesc partitionKeyDesc; NodePosition pos = createPos(context); if (context.LESS() != null) { if (context.MAXVALUE() != null) { return PartitionKeyDesc.createMaxKeyDesc(); } List<PartitionValue> partitionValueList = visit(context.partitionValueList().get(0).partitionValue(), PartitionValue.class); partitionKeyDesc = new PartitionKeyDesc(partitionValueList, pos); } else { List<PartitionValue> lowerPartitionValueList = visit(context.partitionValueList().get(0).partitionValue(), PartitionValue.class); List<PartitionValue> upperPartitionValueList = visit(context.partitionValueList().get(1).partitionValue(), PartitionValue.class); partitionKeyDesc = new PartitionKeyDesc(lowerPartitionValueList, upperPartitionValueList, pos); } return partitionKeyDesc; } @Override public ParseNode visitPartitionValue(StarRocksParser.PartitionValueContext context) { NodePosition pos = createPos(context); if (context.MAXVALUE() != null) { return PartitionValue.MAX_VALUE; } else { return new PartitionValue(((StringLiteral) visit(context.string())).getStringValue(), pos); } } @Override public ParseNode visitDistributionDesc(StarRocksParser.DistributionDescContext context) { int buckets = 0; NodePosition pos = createPos(context); if (context.INTEGER_VALUE() != null) { buckets = Integer.parseInt(context.INTEGER_VALUE().getText()); } if (context.HASH() != null) { List<Identifier> identifierList = visit(context.identifierList().identifier(), Identifier.class); return new HashDistributionDesc(buckets, identifierList.stream().map(Identifier::getValue).collect(toList()), pos); } else { return new RandomDistributionDesc(buckets, pos); } } @Override public ParseNode visitRefreshSchemeDesc(StarRocksParser.RefreshSchemeDescContext context) { LocalDateTime startTime = LocalDateTime.now(); IntervalLiteral intervalLiteral = null; NodePosition pos = createPos(context); MaterializedView.RefreshMoment refreshMoment = Config.default_mv_refresh_immediate ? MaterializedView.RefreshMoment.IMMEDIATE : MaterializedView.RefreshMoment.DEFERRED; if (context.DEFERRED() != null) { refreshMoment = MaterializedView.RefreshMoment.DEFERRED; } else if (context.IMMEDIATE() != null) { refreshMoment = MaterializedView.RefreshMoment.IMMEDIATE; } if (context.ASYNC() != null) { boolean defineStartTime = false; if (context.START() != null) { NodePosition timePos = createPos(context.string()); StringLiteral stringLiteral = (StringLiteral) visit(context.string()); DateTimeFormatter dateTimeFormatter = null; try { dateTimeFormatter = DateUtils.probeFormat(stringLiteral.getStringValue()); LocalDateTime tempStartTime = DateUtils. parseStringWithDefaultHSM(stringLiteral.getStringValue(), dateTimeFormatter); startTime = tempStartTime; defineStartTime = true; } catch (AnalysisException e) { throw new ParsingException(PARSER_ERROR_MSG.invalidDateFormat(stringLiteral.getStringValue()), timePos); } } if (context.interval() != null) { intervalLiteral = (IntervalLiteral) visit(context.interval()); if (!(intervalLiteral.getValue() instanceof IntLiteral)) { String exprSql = intervalLiteral.getValue().toSql(); throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(exprSql, "INTERVAL"), createPos(context.interval())); } } return new AsyncRefreshSchemeDesc(defineStartTime, startTime, intervalLiteral, refreshMoment, pos); } else if (context.MANUAL() != null) { return new ManualRefreshSchemeDesc(refreshMoment, pos); } else if (context.INCREMENTAL() != null) { return new IncrementalRefreshSchemeDesc(refreshMoment, pos); } return null; } @Override public ParseNode visitProperty(StarRocksParser.PropertyContext context) { return new Property( ((StringLiteral) visit(context.key)).getStringValue(), ((StringLiteral) visit(context.value)).getStringValue(), createPos(context)); } @Override public ParseNode visitOutfile(StarRocksParser.OutfileContext context) { Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } String format = null; if (context.fileFormat() != null) { if (context.fileFormat().identifier() != null) { format = ((Identifier) visit(context.fileFormat().identifier())).getValue(); } else if (context.fileFormat().string() != null) { format = ((StringLiteral) visit(context.fileFormat().string())).getStringValue(); } } return new OutFileClause( ((StringLiteral) visit(context.file)).getStringValue(), format, properties, createPos(context)); } @Override public ParseNode visitColumnNameWithComment(StarRocksParser.ColumnNameWithCommentContext context) { String comment = null; if (context.comment() != null) { comment = ((StringLiteral) visit(context.comment())).getStringValue(); } return new ColWithComment(((Identifier) visit(context.identifier())).getValue(), comment, createPos(context)); } @Override public ParseNode visitIdentifierOrStringOrStar(StarRocksParser.IdentifierOrStringOrStarContext context) { String s = null; if (context.identifier() != null) { return visit(context.identifier()); } else if (context.string() != null) { s = ((StringLiteral) visit(context.string())).getStringValue(); } else if (context.ASTERISK_SYMBOL() != null) { s = "*"; } return new Identifier(s, createPos(context)); } @Override public ParseNode visitIdentifierOrString(StarRocksParser.IdentifierOrStringContext context) { String s = null; if (context.identifier() != null) { return visit(context.identifier()); } else if (context.string() != null) { s = ((StringLiteral) visit(context.string())).getStringValue(); } return new Identifier(s, createPos(context)); } @Override public ParseNode visitUserWithHostAndBlanket(StarRocksParser.UserWithHostAndBlanketContext context) { Identifier user = (Identifier) visit(context.identifierOrString(0)); Identifier host = (Identifier) visit(context.identifierOrString(1)); return new UserIdentity(user.getValue(), host.getValue(), true, createPos(context), false); } @Override public ParseNode visitUserWithHost(StarRocksParser.UserWithHostContext context) { Identifier user = (Identifier) visit(context.identifierOrString(0)); Identifier host = (Identifier) visit(context.identifierOrString(1)); return new UserIdentity(user.getValue(), host.getValue(), false, createPos(context), false); } @Override public ParseNode visitUserWithoutHost(StarRocksParser.UserWithoutHostContext context) { Identifier user = (Identifier) visit(context.identifierOrString()); return new UserIdentity(user.getValue(), "%", false, createPos(context), false); } @Override public ParseNode visitPrepareStatement(StarRocksParser.PrepareStatementContext context) { String stmtName = context.identifier().getText(); StatementBase statement = null; if (context.prepareSql().statement() != null) { statement = (StatementBase) visitStatement(context.prepareSql().statement()); return new PrepareStmt(stmtName, statement, parameters); } else if (context.prepareSql().SINGLE_QUOTED_TEXT() != null) { String sql = context.prepareSql().SINGLE_QUOTED_TEXT().getText(); statement = SqlParser.parseSingleStatement(sql.substring(1, sql.length() - 1), sqlMode); if (null != statement && statement instanceof PrepareStmt) { PrepareStmt prepareStmt = (PrepareStmt) statement; return new PrepareStmt(stmtName, prepareStmt.getInnerStmt(), prepareStmt.getParameters()); } else { return new PrepareStmt(stmtName, statement, ImmutableList.of()); } } throw new ParsingException("error prepare sql"); } @Override public ParseNode visitDeallocateStatement(StarRocksParser.DeallocateStatementContext ctx) { return new DeallocateStmt(ctx.identifier().getText()); } @Override public ParseNode visitExecuteStatement(StarRocksParser.ExecuteStatementContext context) { String stmtName = context.identifier().getText(); List<StarRocksParser.IdentifierOrStringContext> queryStatementContext = context.identifierOrString(); List<Expr> variableExprs = new ArrayList<>(); if (context.identifierOrString() != null) { queryStatementContext.forEach(varNameContext -> { Identifier identifier = (Identifier) visit(varNameContext); variableExprs.add(new VariableExpr(identifier.getValue(), SetType.USER)); }); } return new ExecuteStmt(stmtName, variableExprs); } @Override public ParseNode visitParameter(StarRocksParser.ParameterContext ctx) { if (parameters == null) { parameters = new ArrayList<>(); } Parameter parameter = new Parameter(placeHolderSlotId++); parameters.add(parameter); return parameter; } @Override public ParseNode visitDecommissionDiskClause(StarRocksParser.DecommissionDiskClauseContext context) { throw new SemanticException("not support"); } @Override public ParseNode visitCancelDecommissionDiskClause(StarRocksParser.CancelDecommissionDiskClauseContext context) { throw new SemanticException("not support"); } @Override public ParseNode visitDisableDiskClause(StarRocksParser.DisableDiskClauseContext context) { throw new SemanticException("not support"); } @Override public ParseNode visitCancelDisableDiskClause(StarRocksParser.CancelDisableDiskClauseContext context) { throw new SemanticException("not support"); } private <T> List<T> visit(List<? extends ParserRuleContext> contexts, Class<T> clazz) { return contexts.stream() .map(this::visit) .map(clazz::cast) .collect(toList()); } private <T> List<T> visitIfPresent(List<? extends ParserRuleContext> contexts, Class<T> clazz) { if (contexts != null && contexts.size() != 0) { return contexts.stream() .map(this::visit) .map(clazz::cast) .collect(toList()); } else { return null; } } private ParseNode visitIfPresent(ParserRuleContext context) { if (context != null) { return visit(context); } else { return null; } } private FunctionArgsDef getFunctionArgsDef(StarRocksParser.TypeListContext typeList) { List<TypeDef> typeDefList = new ArrayList<>(); for (StarRocksParser.TypeContext typeContext : typeList.type()) { typeDefList.add(new TypeDef(getType(typeContext))); } boolean isVariadic = typeList.DOTDOTDOT() != null; return new FunctionArgsDef(typeDefList, isVariadic); } private String getIdentifierName(StarRocksParser.IdentifierContext context) { return ((Identifier) visit(context)).getValue(); } private QualifiedName getQualifiedName(StarRocksParser.QualifiedNameContext context) { List<String> parts = new ArrayList<>(); NodePosition pos = createPos(context); for (ParseTree c : context.children) { if (c instanceof TerminalNode) { TerminalNode t = (TerminalNode) c; if (t.getSymbol().getType() == StarRocksParser.DOT_IDENTIFIER) { parts.add(t.getText().substring(1)); } } else if (c instanceof StarRocksParser.IdentifierContext) { StarRocksParser.IdentifierContext identifierContext = (StarRocksParser.IdentifierContext) c; Identifier identifier = (Identifier) visit(identifierContext); parts.add(identifier.getValue()); } } return QualifiedName.of(parts, pos); } private TaskName qualifiedNameToTaskName(QualifiedName qualifiedName) { List<String> parts = qualifiedName.getParts(); if (parts.size() == 2) { return new TaskName(parts.get(0), parts.get(1), qualifiedName.getPos()); } else if (parts.size() == 1) { return new TaskName(null, parts.get(0), qualifiedName.getPos()); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidTaskFormat(qualifiedName.toString()), qualifiedName.getPos()); } } private TableName qualifiedNameToTableName(QualifiedName qualifiedName) { List<String> parts = qualifiedName.getParts(); if (parts.size() == 3) { return new TableName(parts.get(0), parts.get(1), parts.get(2), qualifiedName.getPos()); } else if (parts.size() == 2) { return new TableName(null, qualifiedName.getParts().get(0), qualifiedName.getParts().get(1), qualifiedName.getPos()); } else if (parts.size() == 1) { return new TableName(null, null, qualifiedName.getParts().get(0), qualifiedName.getPos()); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidTableFormat(qualifiedName.toString())); } } public Type getType(StarRocksParser.TypeContext context) { if (context.baseType() != null) { return getBaseType(context.baseType()); } else if (context.decimalType() != null) { return getDecimalType(context.decimalType()); } else if (context.arrayType() != null) { return getArrayType(context.arrayType()); } else if (context.structType() != null) { return getStructType(context.structType()); } else { return getMapType(context.mapType()); } } private Type getBaseType(StarRocksParser.BaseTypeContext context) { int length = -1; if (context.typeParameter() != null) { length = Integer.parseInt(context.typeParameter().INTEGER_VALUE().toString()); } if (context.STRING() != null || context.TEXT() != null) { ScalarType type = ScalarType.createVarcharType(ScalarType.DEFAULT_STRING_LENGTH); return type; } else if (context.VARCHAR() != null) { ScalarType type = ScalarType.createVarcharType(length); return type; } else if (context.CHAR() != null) { ScalarType type = ScalarType.createCharType(length); return type; } else if (context.SIGNED() != null) { return Type.INT; } else if (context.HLL() != null) { ScalarType type = ScalarType.createHllType(); return type; } else if (context.BINARY() != null || context.VARBINARY() != null) { ScalarType type = ScalarType.createVarbinary(length); return type; } else { return ScalarType.createType(context.getChild(0).getText()); } } public ScalarType getDecimalType(StarRocksParser.DecimalTypeContext context) { Integer precision = null; Integer scale = null; if (context.precision != null) { precision = Integer.parseInt(context.precision.getText()); if (context.scale != null) { scale = Integer.parseInt(context.scale.getText()); } } if (context.DECIMAL() != null || context.NUMBER() != null || context.NUMERIC() != null) { if (precision != null) { if (scale != null) { return ScalarType.createUnifiedDecimalType(precision, scale); } return ScalarType.createUnifiedDecimalType(precision); } return ScalarType.createUnifiedDecimalType(10, 0); } else if (context.DECIMAL32() != null || context.DECIMAL64() != null || context.DECIMAL128() != null) { try { ScalarType.checkEnableDecimalV3(); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } final PrimitiveType primitiveType = PrimitiveType.valueOf(context.children.get(0).getText().toUpperCase()); if (precision != null) { if (scale != null) { return ScalarType.createDecimalV3Type(primitiveType, precision, scale); } return ScalarType.createDecimalV3Type(primitiveType, precision); } return ScalarType.createDecimalV3Type(primitiveType); } else if (context.DECIMALV2() != null) { if (precision != null) { if (scale != null) { return ScalarType.createDecimalV2Type(precision, scale); } return ScalarType.createDecimalV2Type(precision); } return ScalarType.createDecimalV2Type(); } else { throw new IllegalArgumentException("Unsupported type " + context.getText()); } } public ArrayType getArrayType(StarRocksParser.ArrayTypeContext context) { return new ArrayType(getType(context.type())); } public StructType getStructType(StarRocksParser.StructTypeContext context) { ArrayList<StructField> fields = new ArrayList<>(); List<StarRocksParser.SubfieldDescContext> subfields = context.subfieldDescs().subfieldDesc(); for (StarRocksParser.SubfieldDescContext type : subfields) { Identifier fieldIdentifier = (Identifier) visit(type.identifier()); String fieldName = fieldIdentifier.getValue(); fields.add(new StructField(fieldName, getType(type.type()), null)); } return new StructType(fields); } public MapType getMapType(StarRocksParser.MapTypeContext context) { Type keyType = getType(context.type(0)); if (!keyType.isValidMapKeyType()) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedType(keyType.toString(), "for map's key, which should be base types"), createPos(context.type(0))); } Type valueType = getType(context.type(1)); return new MapType(keyType, valueType); } private LabelName qualifiedNameToLabelName(QualifiedName qualifiedName) { List<String> parts = qualifiedName.getParts(); if (parts.size() == 2) { return new LabelName(parts.get(0), parts.get(1), qualifiedName.getPos()); } else if (parts.size() == 1) { return new LabelName(null, parts.get(0), qualifiedName.getPos()); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidTableFormat(qualifiedName.toString()), qualifiedName.getPos()); } } private Map<String, String> getProperties(StarRocksParser.PropertiesContext context) { Map<String, String> properties = new HashMap<>(); if (context != null && context.property() != null) { List<Property> propertyList = visit(context.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return properties; } private Map<String, String> getPropertyList(StarRocksParser.PropertyListContext context) { Map<String, String> properties = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); if (context != null && context.property() != null) { List<Property> propertyList = visit(context.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return properties; } private List<ParseNode> getLoadPropertyList(List<StarRocksParser.LoadPropertiesContext> loadPropertiesContexts) { List<ParseNode> loadPropertyList = new ArrayList<>(); Preconditions.checkNotNull(loadPropertiesContexts, "load properties is null"); for (StarRocksParser.LoadPropertiesContext loadPropertiesContext : loadPropertiesContexts) { if (loadPropertiesContext.colSeparatorProperty() != null) { StringLiteral literal = (StringLiteral) visit(loadPropertiesContext.colSeparatorProperty().string()); loadPropertyList.add(new ColumnSeparator(literal.getValue(), literal.getPos())); } if (loadPropertiesContext.rowDelimiterProperty() != null) { StringLiteral literal = (StringLiteral) visit(loadPropertiesContext.rowDelimiterProperty().string()); loadPropertyList.add(new RowDelimiter(literal.getValue(), literal.getPos())); } if (loadPropertiesContext.importColumns() != null) { ImportColumnsStmt importColumnsStmt = (ImportColumnsStmt) visit(loadPropertiesContext.importColumns()); loadPropertyList.add(importColumnsStmt); } if (loadPropertiesContext.expression() != null) { Expr where = (Expr) visit(loadPropertiesContext.expression()); loadPropertyList.add(new ImportWhereStmt(where, where.getPos())); } if (loadPropertiesContext.partitionNames() != null) { loadPropertyList.add(visit(loadPropertiesContext.partitionNames())); } } return loadPropertyList; } @Override public ParseNode visitImportColumns(StarRocksParser.ImportColumnsContext importColumnsContext) { List<ImportColumnDesc> columns = new ArrayList<>(); for (StarRocksParser.QualifiedNameContext qualifiedNameContext : importColumnsContext.columnProperties().qualifiedName()) { String column = ((Identifier) (visit(qualifiedNameContext))).getValue(); ImportColumnDesc columnDesc = new ImportColumnDesc(column, null, createPos(qualifiedNameContext)); columns.add(columnDesc); } for (StarRocksParser.AssignmentContext assignmentContext : importColumnsContext.columnProperties().assignment()) { ColumnAssignment columnAssignment = (ColumnAssignment) (visit(assignmentContext)); Expr expr = columnAssignment.getExpr(); ImportColumnDesc columnDesc = new ImportColumnDesc(columnAssignment.getColumn(), expr, createPos(assignmentContext)); columns.add(columnDesc); } return new ImportColumnsStmt(columns, createPos(importColumnsContext)); } private Map<String, String> getJobProperties(StarRocksParser.JobPropertiesContext jobPropertiesContext) { Map<String, String> jobProperties = new HashMap<>(); if (jobPropertiesContext != null) { List<Property> propertyList = visit(jobPropertiesContext.properties().property(), Property.class); for (Property property : propertyList) { jobProperties.put(property.getKey(), property.getValue()); } } return jobProperties; } private Map<String, String> getDataSourceProperties( StarRocksParser.DataSourcePropertiesContext dataSourcePropertiesContext) { Map<String, String> dataSourceProperties = new HashMap<>(); if (dataSourcePropertiesContext != null) { List<Property> propertyList = visit(dataSourcePropertiesContext.propertyList().property(), Property.class); for (Property property : propertyList) { dataSourceProperties.put(property.getKey(), property.getValue()); } } return dataSourceProperties; } public List<String> getColumnNames(StarRocksParser.ColumnAliasesContext context) { if (context == null) { return null; } List<Identifier> targetColumnNamesIdentifiers = visitIfPresent(context.identifier(), Identifier.class); if (targetColumnNamesIdentifiers != null) { return targetColumnNamesIdentifiers.stream() .map(Identifier::getValue).map(String::toLowerCase).collect(toList()); } else { return null; } } private NodePosition createPos(ParserRuleContext context) { return createPos(context.start, context.stop); } private NodePosition createPos(Token start, Token stop) { if (start == null) { return NodePosition.ZERO; } if (stop == null) { return new NodePosition(start.getLine(), start.getCharPositionInLine()); } return new NodePosition(start, stop); } private LabelName createLabelName(StarRocksParser.QualifiedNameContext dbCtx, StarRocksParser.IdentifierContext nameCtx) { Token start = null; Token stop = null; String name = null; if (nameCtx != null) { name = getIdentifierName(nameCtx); start = nameCtx.start; stop = nameCtx.stop; } String dbName = null; if (dbCtx != null) { dbName = getQualifiedName(dbCtx).toString(); start = dbCtx.start; } return new LabelName(dbName, name, createPos(start, stop)); } private List<HintNode> extractQueryScopeHintNode() { List<HintNode> res = Lists.newArrayList(); for (Map.Entry<ParserRuleContext, List<HintNode>> entry : hintMap.entrySet()) { for (HintNode hintNode : entry.getValue()) { if (hintNode.getScope() == HintNode.Scope.QUERY) { res.add(hintNode); } } } Collections.sort(res); return res; } }
class AstBuilder extends StarRocksBaseVisitor<ParseNode> { private final long sqlMode; private final IdentityHashMap<ParserRuleContext, List<HintNode>> hintMap; private int placeHolderSlotId = 0; private List<Parameter> parameters; private static final BigInteger LONG_MAX = new BigInteger("9223372036854775807"); private static final BigInteger LARGEINT_MAX_ABS = new BigInteger("170141183460469231731687303715884105728"); private static final List<String> DATE_FUNCTIONS = Lists.newArrayList(FunctionSet.DATE_ADD, FunctionSet.ADDDATE, FunctionSet.DATE_ADD, FunctionSet.DATE_SUB, FunctionSet.SUBDATE, FunctionSet.DAYS_SUB); private static final List<String> PARTITION_FUNCTIONS = Lists.newArrayList(FunctionSet.SUBSTR, FunctionSet.SUBSTRING, FunctionSet.FROM_UNIXTIME, FunctionSet.FROM_UNIXTIME_MS, FunctionSet.STR2DATE); public AstBuilder(long sqlMode) { this(sqlMode, new IdentityHashMap<>()); } public AstBuilder(long sqlMode, IdentityHashMap<ParserRuleContext, List<HintNode>> hintMap) { this.hintMap = hintMap; long hintSqlMode = 0L; for (Map.Entry<ParserRuleContext, List<HintNode>> entry : hintMap.entrySet()) { for (HintNode hint : entry.getValue()) { if (hint instanceof SetVarHint) { SetVarHint setVarHint = (SetVarHint) hint; hintSqlMode = setVarHint.getSqlModeHintValue(); } } } this.sqlMode = sqlMode | hintSqlMode; } public List<Parameter> getParameters() { return parameters; } @Override public ParseNode visitSingleStatement(StarRocksParser.SingleStatementContext context) { if (context.statement() != null) { StatementBase stmt = (StatementBase) visit(context.statement()); if (MapUtils.isNotEmpty(hintMap)) { stmt.setAllQueryScopeHints(extractQueryScopeHintNode()); hintMap.clear(); } return stmt; } else { return visit(context.emptyStatement()); } } @Override public ParseNode visitEmptyStatement(StarRocksParser.EmptyStatementContext context) { return new EmptyStmt(); } @Override public ParseNode visitUseDatabaseStatement(StarRocksParser.UseDatabaseStatementContext context) { NodePosition pos = createPos(context); QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); List<String> parts = qualifiedName.getParts(); if (parts.size() == 1) { return new UseDbStmt(null, parts.get(0), pos); } else if (parts.size() == 2) { return new UseDbStmt(parts.get(0), parts.get(1), pos); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidDbFormat(qualifiedName.toString()), qualifiedName.getPos()); } } @Override public ParseNode visitUseCatalogStatement(StarRocksParser.UseCatalogStatementContext context) { StringLiteral literal = (StringLiteral) visit(context.string()); return new UseCatalogStmt(literal.getValue(), createPos(context)); } @Override public ParseNode visitSetCatalogStatement(StarRocksParser.SetCatalogStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String catalogName = identifier.getValue(); return new SetCatalogStmt(catalogName, createPos(context)); } @Override public ParseNode visitShowDatabasesStatement(StarRocksParser.ShowDatabasesStatementContext context) { String catalog = null; NodePosition pos = createPos(context); if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new ShowDbStmt(stringLiteral.getValue(), null, catalog, pos); } else if (context.expression() != null) { return new ShowDbStmt(null, (Expr) visit(context.expression()), catalog, pos); } else { return new ShowDbStmt(null, null, catalog, pos); } } @Override public ParseNode visitAlterDbQuotaStatement(StarRocksParser.AlterDbQuotaStatementContext context) { String dbName = ((Identifier) visit(context.identifier(0))).getValue(); NodePosition pos = createPos(context); if (context.DATA() != null) { String quotaValue = ((Identifier) visit(context.identifier(1))).getValue(); return new AlterDatabaseQuotaStmt(dbName, AlterDatabaseQuotaStmt.QuotaType.DATA, quotaValue, pos); } else { String quotaValue = context.INTEGER_VALUE().getText(); return new AlterDatabaseQuotaStmt(dbName, AlterDatabaseQuotaStmt.QuotaType.REPLICA, quotaValue, pos); } } @Override public ParseNode visitCreateDbStatement(StarRocksParser.CreateDbStatementContext context) { String catalogName = ""; if (context.catalog != null) { catalogName = getIdentifierName(context.catalog); } String dbName = getIdentifierName(context.database); Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new CreateDbStmt(context.IF() != null, catalogName, dbName, properties, createPos(context)); } @Override public ParseNode visitDropDbStatement(StarRocksParser.DropDbStatementContext context) { String catalogName = ""; if (context.catalog != null) { catalogName = getIdentifierName(context.catalog); } String dbName = getIdentifierName(context.database); return new DropDbStmt(context.IF() != null, catalogName, dbName, context.FORCE() != null, createPos(context)); } @Override public ParseNode visitShowCreateDbStatement(StarRocksParser.ShowCreateDbStatementContext context) { String dbName = ((Identifier) visit(context.identifier())).getValue(); return new ShowCreateDbStmt(dbName, createPos(context)); } @Override public ParseNode visitAlterDatabaseRenameStatement(StarRocksParser.AlterDatabaseRenameStatementContext context) { String dbName = ((Identifier) visit(context.identifier(0))).getValue(); String newName = ((Identifier) visit(context.identifier(1))).getValue(); return new AlterDatabaseRenameStatement(dbName, newName, createPos(context)); } @Override public ParseNode visitRecoverDbStmt(StarRocksParser.RecoverDbStmtContext context) { String dbName = ((Identifier) visit(context.identifier())).getValue(); return new RecoverDbStmt(dbName, createPos(context)); } @Override public ParseNode visitShowDataStmt(StarRocksParser.ShowDataStmtContext context) { NodePosition pos = createPos(context); if (context.FROM() != null) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new ShowDataStmt(targetTableName.getDb(), targetTableName.getTbl(), pos); } else { return new ShowDataStmt(null, null, pos); } } @Override public ParseNode visitCreateTableStatement(StarRocksParser.CreateTableStatementContext context) { Map<String, String> properties = null; if (context.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } Map<String, String> extProperties = null; if (context.extProperties() != null) { extProperties = new HashMap<>(); List<Property> propertyList = visit(context.extProperties().properties().property(), Property.class); for (Property property : propertyList) { extProperties.put(property.getKey(), property.getValue()); } } TableName tableName = qualifiedNameToTableName(getQualifiedName(context.qualifiedName())); List<ColumnDef> columnDefs = null; if (context.columnDesc() != null) { columnDefs = getColumnDefs(context.columnDesc()); } return new CreateTableStmt( context.IF() != null, context.EXTERNAL() != null, tableName, columnDefs, context.indexDesc() == null ? null : getIndexDefs(context.indexDesc()), context.engineDesc() == null ? "" : ((Identifier) visit(context.engineDesc().identifier())).getValue(), context.charsetDesc() == null ? null : ((Identifier) visit(context.charsetDesc().identifierOrString())).getValue(), context.keyDesc() == null ? null : getKeysDesc(context.keyDesc()), context.partitionDesc() == null ? null : getPartitionDesc(context.partitionDesc(), columnDefs), context.distributionDesc() == null ? null : (DistributionDesc) visit(context.distributionDesc()), properties, extProperties, context.comment() == null ? null : ((StringLiteral) visit(context.comment().string())).getStringValue(), context.rollupDesc() == null ? null : context.rollupDesc().rollupItem().stream().map(this::getRollup).collect(toList()), context.orderByDesc() == null ? null : visit(context.orderByDesc().identifierList().identifier(), Identifier.class) .stream().map(Identifier::getValue).collect(toList())); } private PartitionDesc getPartitionDesc(StarRocksParser.PartitionDescContext context, List<ColumnDef> columnDefs) { List<PartitionDesc> partitionDescList = new ArrayList<>(); if (context.functionCall() != null) { String currentGranularity = null; for (StarRocksParser.RangePartitionDescContext rangePartitionDescContext : context.rangePartitionDesc()) { final PartitionDesc rangePartitionDesc = (PartitionDesc) visit(rangePartitionDescContext); if (!(rangePartitionDesc instanceof MultiRangePartitionDesc)) { throw new ParsingException("Automatic partition table creation only supports " + "batch create partition syntax", rangePartitionDesc.getPos()); } MultiRangePartitionDesc multiRangePartitionDesc = (MultiRangePartitionDesc) rangePartitionDesc; String descGranularity = multiRangePartitionDesc.getTimeUnit().toLowerCase(); if (currentGranularity == null) { currentGranularity = descGranularity; } else if (!currentGranularity.equals(descGranularity)) { throw new ParsingException("The partition granularity of automatic partition table " + "batch creation in advance should be consistent", rangePartitionDesc.getPos()); } partitionDescList.add(rangePartitionDesc); } FunctionCallExpr functionCallExpr = (FunctionCallExpr) visit(context.functionCall()); List<String> columnList = AnalyzerUtils.checkAndExtractPartitionCol(functionCallExpr, columnDefs); AnalyzerUtils.checkAutoPartitionTableLimit(functionCallExpr, currentGranularity); RangePartitionDesc rangePartitionDesc = new RangePartitionDesc(columnList, partitionDescList); rangePartitionDesc.setAutoPartitionTable(true); return new ExpressionPartitionDesc(rangePartitionDesc, functionCallExpr); } StarRocksParser.PrimaryExpressionContext primaryExpressionContext = context.primaryExpression(); if (primaryExpressionContext != null) { Expr primaryExpression = (Expr) visit(primaryExpressionContext); if (context.RANGE() != null) { for (StarRocksParser.RangePartitionDescContext rangePartitionDescContext : context.rangePartitionDesc()) { final PartitionDesc rangePartitionDesc = (PartitionDesc) visit(rangePartitionDescContext); partitionDescList.add(rangePartitionDesc); } } List<String> columnList = checkAndExtractPartitionColForRange(primaryExpression, false); RangePartitionDesc rangePartitionDesc = new RangePartitionDesc(columnList, partitionDescList); if (primaryExpression instanceof FunctionCallExpr) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) primaryExpression; String functionName = functionCallExpr.getFnName().getFunction(); if (FunctionSet.FROM_UNIXTIME.equals(functionName) || FunctionSet.FROM_UNIXTIME_MS.equals(functionName)) { primaryExpression = new CastExpr(TypeDef.create(PrimitiveType.DATETIME), primaryExpression); } } return new ExpressionPartitionDesc(rangePartitionDesc, primaryExpression); } List<Identifier> identifierList = visit(context.identifierList().identifier(), Identifier.class); List<String> columnList = identifierList.stream().map(Identifier::getValue).collect(toList()); if (context.RANGE() != null) { for (StarRocksParser.RangePartitionDescContext rangePartitionDescContext : context.rangePartitionDesc()) { final PartitionDesc rangePartitionDesc = (PartitionDesc) visit(rangePartitionDescContext); partitionDescList.add(rangePartitionDesc); } return new RangePartitionDesc(columnList, partitionDescList); } else if (context.LIST() != null) { for (StarRocksParser.ListPartitionDescContext listPartitionDescContext : context.listPartitionDesc()) { final PartitionDesc listPartitionDesc = (PartitionDesc) visit(listPartitionDescContext); partitionDescList.add(listPartitionDesc); } return new ListPartitionDesc(columnList, partitionDescList); } else { if (context.listPartitionDesc().size() > 0) { throw new ParsingException("Does not support creating partitions in advance"); } ListPartitionDesc listPartitionDesc = new ListPartitionDesc(columnList, partitionDescList); listPartitionDesc.setAutoPartitionTable(true); return listPartitionDesc; } } private List<String> checkAndExtractPartitionColForRange(Expr expr, boolean hasCast) { if (expr instanceof CastExpr) { CastExpr castExpr = (CastExpr) expr; return checkAndExtractPartitionColForRange(castExpr.getChild(0), true); } NodePosition pos = expr.getPos(); List<String> columnList = new ArrayList<>(); if (expr instanceof FunctionCallExpr) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) expr; String functionName = functionCallExpr.getFnName().getFunction().toLowerCase(); List<Expr> paramsExpr = functionCallExpr.getParams().exprs(); if (PARTITION_FUNCTIONS.contains(functionName)) { Expr firstExpr = paramsExpr.get(0); if (firstExpr instanceof SlotRef) { columnList.add(((SlotRef) firstExpr).getColumnName()); } else { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(expr.toSql(), "PARTITION BY"), pos); } } else { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(expr.toSql(), "PARTITION BY"), pos); } if (functionName.equals(FunctionSet.FROM_UNIXTIME) || functionName.equals(FunctionSet.FROM_UNIXTIME_MS)) { if (hasCast || paramsExpr.size() > 1) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(expr.toSql(), "PARTITION BY"), pos); } } } return columnList; } private AlterClause getRollup(StarRocksParser.RollupItemContext rollupItemContext) { String rollupName = ((Identifier) visit(rollupItemContext.identifier())).getValue(); List<Identifier> columnList = visit(rollupItemContext.identifierList().identifier(), Identifier.class); List<String> dupKeys = null; if (rollupItemContext.dupKeys() != null) { final List<Identifier> identifierList = visit(rollupItemContext.dupKeys().identifierList().identifier(), Identifier.class); dupKeys = identifierList.stream().map(Identifier::getValue).collect(toList()); } String baseRollupName = rollupItemContext.fromRollup() != null ? ((Identifier) visit(rollupItemContext.fromRollup().identifier())).getValue() : null; Map<String, String> properties = null; if (rollupItemContext.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(rollupItemContext.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new AddRollupClause(rollupName, columnList.stream().map(Identifier::getValue).collect(toList()), dupKeys, baseRollupName, properties, createPos(rollupItemContext)); } private KeysDesc getKeysDesc(StarRocksParser.KeyDescContext context) { KeysType keysType = null; if (null != context.PRIMARY()) { keysType = KeysType.PRIMARY_KEYS; } else if (null != context.DUPLICATE()) { keysType = KeysType.DUP_KEYS; } else if (null != context.AGGREGATE()) { keysType = KeysType.AGG_KEYS; } else if (null != context.UNIQUE()) { keysType = KeysType.UNIQUE_KEYS; } List<Identifier> columnList = visit(context.identifierList().identifier(), Identifier.class); return new KeysDesc(keysType, columnList.stream().map(Identifier::getValue).collect(toList()), createPos(context)); } private List<IndexDef> getIndexDefs(List<StarRocksParser.IndexDescContext> indexDesc) { List<IndexDef> indexDefList = new ArrayList<>(); for (StarRocksParser.IndexDescContext context : indexDesc) { String indexName = ((Identifier) visit(context.identifier())).getValue(); List<Identifier> columnList = visit(context.identifierList().identifier(), Identifier.class); String comment = context.comment() != null ? ((StringLiteral) visit(context.comment())).getStringValue() : null; final IndexDef indexDef = new IndexDef(indexName, columnList.stream().map(Identifier::getValue).collect(toList()), getIndexType(context.indexType()), comment, getPropertyList(context.propertyList()), createPos(context)); indexDefList.add(indexDef); } return indexDefList; } private List<ColumnDef> getColumnDefs(List<StarRocksParser.ColumnDescContext> columnDesc) { return columnDesc.stream().map(context -> getColumnDef(context)).collect(toList()); } private ColumnDef getColumnDef(StarRocksParser.ColumnDescContext context) { Identifier colIdentifier = (Identifier) visit(context.identifier()); String columnName = colIdentifier.getValue(); TypeDef typeDef = new TypeDef(getType(context.type()), createPos(context.type())); String charsetName = context.charsetName() != null ? ((Identifier) visit(context.charsetName().identifier())).getValue() : null; boolean isKey = context.KEY() != null; AggregateType aggregateType = context.aggDesc() != null ? AggregateType.valueOf(context.aggDesc().getText().toUpperCase()) : null; Boolean isAllowNull = null; if (context.NOT() != null && context.NULL() != null) { isAllowNull = false; } else if (context.NULL() != null) { isAllowNull = true; } Boolean isAutoIncrement = null; if (context.AUTO_INCREMENT() != null) { isAutoIncrement = true; } if (isAutoIncrement != null && isAllowNull != null && isAllowNull) { throw new ParsingException(PARSER_ERROR_MSG.nullColFoundInPK(columnName), colIdentifier.getPos()); } if (isAutoIncrement != null) { isAllowNull = false; } ColumnDef.DefaultValueDef defaultValueDef = ColumnDef.DefaultValueDef.NOT_SET; final StarRocksParser.DefaultDescContext defaultDescContext = context.defaultDesc(); if (defaultDescContext != null) { if (defaultDescContext.string() != null) { String value = ((StringLiteral) visit(defaultDescContext.string())).getStringValue(); defaultValueDef = new ColumnDef.DefaultValueDef(true, new StringLiteral(value)); } else if (defaultDescContext.NULL() != null) { defaultValueDef = ColumnDef.DefaultValueDef.NULL_DEFAULT_VALUE; } else if (defaultDescContext.CURRENT_TIMESTAMP() != null) { defaultValueDef = ColumnDef.DefaultValueDef.CURRENT_TIMESTAMP_VALUE; } else if (defaultDescContext.qualifiedName() != null) { String functionName = defaultDescContext.qualifiedName().getText().toLowerCase(); defaultValueDef = new ColumnDef.DefaultValueDef(true, new FunctionCallExpr(functionName, new ArrayList<>())); } } final StarRocksParser.GeneratedColumnDescContext generatedColumnDescContext = context.generatedColumnDesc(); Expr expr = null; if (generatedColumnDescContext != null) { if (isAllowNull != null && isAllowNull == false) { throw new ParsingException(PARSER_ERROR_MSG.foundNotNull("Generated Column")); } if (isKey) { throw new ParsingException(PARSER_ERROR_MSG.isKey("Generated Column")); } expr = (Expr) visit(generatedColumnDescContext.expression()); } String comment = context.comment() == null ? "" : ((StringLiteral) visit(context.comment().string())).getStringValue(); return new ColumnDef(columnName, typeDef, charsetName, isKey, aggregateType, isAllowNull, defaultValueDef, isAutoIncrement, expr, comment, createPos(context)); } @Override public ParseNode visitCreateTemporaryTableStatement(StarRocksParser.CreateTemporaryTableStatementContext context) { if (!Config.enable_experimental_temporary_table) { throw new SemanticException( "Temporary table feature is experimental and disabled by default, could be enabled through " + ": admin set frontend config('enable_experimental_temporary_table' = 'true')"); } CreateTableStmt createTableStmt = new CreateTableStmt( false, false, qualifiedNameToTableName(getQualifiedName(context.qualifiedName())), null, EngineType.defaultEngine().name(), null, null, null, new HashMap<>(), null, null); return new CreateTableAsSelectStmt( createTableStmt, null, (QueryStatement) visit(context.queryStatement())); } @Override public ParseNode visitCreateTableAsSelectStatement(StarRocksParser.CreateTableAsSelectStatementContext context) { Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } PartitionDesc partitionDesc = null; if (context.partitionDesc() != null) { partitionDesc = (PartitionDesc) visit(context.partitionDesc()); if (partitionDesc instanceof ListPartitionDesc && context.partitionDesc().LIST() == null) { ((ListPartitionDesc) partitionDesc).setAutoPartitionTable(true); } } CreateTableStmt createTableStmt = new CreateTableStmt( context.IF() != null, false, qualifiedNameToTableName(getQualifiedName(context.qualifiedName())), null, context.indexDesc() == null ? null : getIndexDefs(context.indexDesc()), "", null, context.keyDesc() == null ? null : getKeysDesc(context.keyDesc()), partitionDesc, context.distributionDesc() == null ? null : (DistributionDesc) visit(context.distributionDesc()), properties, null, context.comment() == null ? null : ((StringLiteral) visit(context.comment().string())).getStringValue(), null, context.orderByDesc() == null ? null : visit(context.orderByDesc().identifierList().identifier(), Identifier.class) .stream().map(Identifier::getValue).collect(toList()) ); List<Identifier> columns = visitIfPresent(context.identifier(), Identifier.class); return new CreateTableAsSelectStmt( createTableStmt, columns == null ? null : columns.stream().map(Identifier::getValue).collect(toList()), (QueryStatement) visit(context.queryStatement()), createPos(context)); } @Override public ParseNode visitCreateTableLikeStatement(StarRocksParser.CreateTableLikeStatementContext context) { PartitionDesc partitionDesc = context.partitionDesc() == null ? null : (PartitionDesc) visit(context.partitionDesc()); DistributionDesc distributionDesc = context.distributionDesc() == null ? null : (DistributionDesc) visit(context.distributionDesc()); Map<String, String> properties = getProperties(context.properties()); return new CreateTableLikeStmt(context.IF() != null, qualifiedNameToTableName(getQualifiedName(context.qualifiedName(0))), qualifiedNameToTableName(getQualifiedName(context.qualifiedName(1))), partitionDesc, distributionDesc, properties, createPos(context)); } @Override public ParseNode visitShowCreateTableStatement(StarRocksParser.ShowCreateTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); NodePosition pos = createPos(context); if (context.MATERIALIZED() != null && context.VIEW() != null) { return new ShowCreateTableStmt(targetTableName, ShowCreateTableStmt.CreateTableType.MATERIALIZED_VIEW, pos); } if (context.VIEW() != null) { return new ShowCreateTableStmt(targetTableName, ShowCreateTableStmt.CreateTableType.VIEW, pos); } return new ShowCreateTableStmt(targetTableName, ShowCreateTableStmt.CreateTableType.TABLE, pos); } @Override public ParseNode visitDropTableStatement(StarRocksParser.DropTableStatementContext context) { boolean ifExists = context.IF() != null && context.EXISTS() != null; boolean force = context.FORCE() != null; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new DropTableStmt(ifExists, targetTableName, false, force, createPos(context)); } @Override public ParseNode visitRecoverTableStatement(StarRocksParser.RecoverTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); return new RecoverTableStmt(tableName, createPos(context)); } @Override public ParseNode visitTruncateTableStatement(StarRocksParser.TruncateTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); Token start = context.start; Token stop = context.stop; PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } NodePosition pos = createPos(start, stop); return new TruncateTableStmt(new TableRef(targetTableName, null, partitionNames, pos)); } @Override public ParseNode visitShowTableStatement(StarRocksParser.ShowTableStatementContext context) { boolean isVerbose = context.FULL() != null; String database = null; String catalog = null; if (context.qualifiedName() != null) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); List<String> parts = qualifiedName.getParts(); if (parts.size() == 2) { catalog = qualifiedName.getParts().get(0); database = qualifiedName.getParts().get(1); } else if (parts.size() == 1) { database = qualifiedName.getParts().get(0); } } NodePosition pos = createPos(context); if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new ShowTableStmt(database, isVerbose, stringLiteral.getValue(), null, catalog, pos); } else if (context.expression() != null) { return new ShowTableStmt(database, isVerbose, null, (Expr) visit(context.expression()), catalog, pos); } else { return new ShowTableStmt(database, isVerbose, null, null, catalog, pos); } } @Override public ParseNode visitDescTableStatement(StarRocksParser.DescTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new DescribeStmt(targetTableName, context.ALL() != null, createPos(context)); } @Override public ParseNode visitShowTableStatusStatement(StarRocksParser.ShowTableStatusStatementContext context) { QualifiedName dbName = null; if (context.qualifiedName() != null) { dbName = getQualifiedName(context.db); } String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowTableStatusStmt(dbName == null ? null : dbName.toString(), pattern, where, createPos(context)); } @Override public ParseNode visitShowColumnStatement(StarRocksParser.ShowColumnStatementContext context) { QualifiedName tableName = getQualifiedName(context.table); QualifiedName dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db); } String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowColumnStmt(qualifiedNameToTableName(tableName), dbName == null ? null : dbName.toString(), pattern, context.FULL() != null, where, createPos(context)); } @Override public ParseNode visitRefreshTableStatement(StarRocksParser.RefreshTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); List<String> partitionNames = null; if (context.string() != null) { partitionNames = context.string().stream() .map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); } return new RefreshTableStmt(targetTableName, partitionNames, createPos(context)); } @Override public ParseNode visitAlterTableStatement(StarRocksParser.AlterTableStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); NodePosition pos = createPos(context); if (context.ROLLUP() != null) { if (context.ADD() != null) { List<AlterClause> clauses = context.rollupItem().stream().map(this::getRollup).collect(toList()); return new AlterTableStmt(targetTableName, clauses, pos); } else { List<Identifier> rollupList = visit(context.identifier(), Identifier.class); List<AlterClause> clauses = new ArrayList<>(); for (Identifier rollupName : rollupList) { clauses.add(new DropRollupClause(rollupName.getValue(), null, rollupName.getPos())); } return new AlterTableStmt(targetTableName, clauses, pos); } } else { List<AlterClause> alterClauses = visit(context.alterClause(), AlterClause.class); return new AlterTableStmt(targetTableName, alterClauses, pos); } } @Override public ParseNode visitCancelAlterTableStatement(StarRocksParser.CancelAlterTableStatementContext context) { ShowAlterStmt.AlterType alterType; if (context.ROLLUP() != null) { alterType = ShowAlterStmt.AlterType.ROLLUP; } else if (context.MATERIALIZED() != null && context.VIEW() != null) { alterType = ShowAlterStmt.AlterType.MATERIALIZED_VIEW; } else if (context.OPTIMIZE() != null) { alterType = ShowAlterStmt.AlterType.OPTIMIZE; } else { alterType = ShowAlterStmt.AlterType.COLUMN; } QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName dbTableName = qualifiedNameToTableName(qualifiedName); List<Long> alterJobIdList = null; if (context.INTEGER_VALUE() != null) { alterJobIdList = context.INTEGER_VALUE() .stream().map(ParseTree::getText).map(Long::parseLong).collect(toList()); } return new CancelAlterTableStmt(alterType, dbTableName, alterJobIdList, createPos(context)); } @Override public ParseNode visitShowAlterStatement(StarRocksParser.ShowAlterStatementContext context) { QualifiedName dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } ShowAlterStmt.AlterType alterType; if (context.ROLLUP() != null) { alterType = ShowAlterStmt.AlterType.ROLLUP; } else if (context.MATERIALIZED() != null && context.VIEW() != null) { alterType = ShowAlterStmt.AlterType.MATERIALIZED_VIEW; } else if (context.OPTIMIZE() != null) { alterType = ShowAlterStmt.AlterType.OPTIMIZE; } else { alterType = ShowAlterStmt.AlterType.COLUMN; } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowAlterStmt(alterType, dbName == null ? null : dbName.toString(), where, orderByElements, limitElement, createPos(context)); } @Override public ParseNode visitCreateViewStatement(StarRocksParser.CreateViewStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); List<ColWithComment> colWithComments = null; if (context.columnNameWithComment().size() > 0) { colWithComments = visit(context.columnNameWithComment(), ColWithComment.class); } if (context.IF() != null && context.REPLACE() != null) { throw new ParsingException(PARSER_ERROR_MSG.conflictedOptions("if not exists", "or replace"), createPos(context)); } return new CreateViewStmt( context.IF() != null, context.REPLACE() != null, targetTableName, colWithComments, context.comment() == null ? null : ((StringLiteral) visit(context.comment())).getStringValue(), (QueryStatement) visit(context.queryStatement()), createPos(context)); } @Override public ParseNode visitAlterViewStatement(StarRocksParser.AlterViewStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); List<ColWithComment> colWithComments = null; if (context.columnNameWithComment().size() > 0) { colWithComments = visit(context.columnNameWithComment(), ColWithComment.class); } QueryStatement queryStatement = (QueryStatement) visit(context.queryStatement()); AlterClause alterClause = new AlterViewClause(colWithComments, queryStatement, createPos(context)); return new AlterViewStmt(targetTableName, alterClause, createPos(context)); } @Override public ParseNode visitDropViewStatement(StarRocksParser.DropViewStatementContext context) { boolean ifExists = context.IF() != null && context.EXISTS() != null; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new DropTableStmt(ifExists, targetTableName, true, false, createPos(context)); } @Override public ParseNode visitShowPartitionsStatement(StarRocksParser.ShowPartitionsStatementContext context) { boolean temp = context.TEMPORARY() != null; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } List<OrderByElement> orderByElements = new ArrayList<>(); if (context.ORDER() != null) { orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowPartitionsStmt(tableName, where, orderByElements, limitElement, temp, createPos(context)); } @Override public ParseNode visitRecoverPartitionStatement(StarRocksParser.RecoverPartitionStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); String partitionName = ((Identifier) visit(context.identifier())).getValue(); return new RecoverPartitionStmt(tableName, partitionName, createPos(context)); } @Override public ParseNode visitShowTabletStatement(StarRocksParser.ShowTabletStatementContext context) { NodePosition pos = createPos(context); if (context.INTEGER_VALUE() != null) { return new ShowTabletStmt(null, Long.parseLong(context.INTEGER_VALUE().getText()), pos); } else { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName dbTblName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { partitionNames = (PartitionNames) visit(context.partitionNames()); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowTabletStmt(dbTblName, -1L, partitionNames, where, orderByElements, limitElement, createPos(context)); } } @Override public ParseNode visitCreateIndexStatement(StarRocksParser.CreateIndexStatementContext context) { String indexName = ((Identifier) visit(context.identifier())).getValue(); List<Identifier> columnList = visit(context.identifierList().identifier(), Identifier.class); Token idxStart = context.identifier().start; Token idxStop = context.identifierList().stop; String comment = null; if (context.comment() != null) { comment = ((StringLiteral) visit(context.comment())).getStringValue(); idxStop = context.comment().stop; } NodePosition idxPos = createPos(idxStart, idxStop); IndexDef indexDef = new IndexDef(indexName, columnList.stream().map(Identifier::getValue).collect(toList()), getIndexType(context.indexType()), comment, getPropertyList(context.propertyList()), idxPos); CreateIndexClause createIndexClause = new CreateIndexClause(indexDef, idxPos); QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new AlterTableStmt(targetTableName, Lists.newArrayList(createIndexClause), createPos(context)); } @Override public ParseNode visitDropIndexStatement(StarRocksParser.DropIndexStatementContext context) { Identifier identifier = (Identifier) visit(context.identifier()); DropIndexClause dropIndexClause = new DropIndexClause(identifier.getValue(), createPos(context.identifier())); QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); return new AlterTableStmt(targetTableName, Lists.newArrayList(dropIndexClause), createPos(context)); } @Override public ParseNode visitShowIndexStatement(StarRocksParser.ShowIndexStatementContext context) { QualifiedName tableName = getQualifiedName(context.table); QualifiedName dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db); } return new ShowIndexStmt(dbName == null ? null : dbName.toString(), qualifiedNameToTableName(tableName), createPos(context)); } private Map<String, String> buildProperties(StarRocksParser.PropertiesContext properties) { Map<String, String> result = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); if (properties != null) { List<Property> propertyList = visit(properties.property(), Property.class); for (Property property : ListUtils.emptyIfNull(propertyList)) { result.put(property.getKey(), property.getValue()); } } return result; } @Override public ParseNode visitSubmitTaskStatement(StarRocksParser.SubmitTaskStatementContext context) { QualifiedName qualifiedName = null; if (context.qualifiedName() != null) { qualifiedName = getQualifiedName(context.qualifiedName()); } Map<String, String> properties = buildProperties(context.properties()); properties.putAll(extractVarHintValues(hintMap.get(context))); CreateTableAsSelectStmt createTableAsSelectStmt = null; InsertStmt insertStmt = null; if (context.createTableAsSelectStatement() != null) { createTableAsSelectStmt = (CreateTableAsSelectStmt) visit(context.createTableAsSelectStatement()); } else if (context.insertStatement() != null) { insertStmt = (InsertStmt) visit(context.insertStatement()); } int startIndex = 0; if (createTableAsSelectStmt != null) { startIndex = context.createTableAsSelectStatement().start.getStartIndex(); } else { startIndex = context.insertStatement().start.getStartIndex(); } NodePosition pos = createPos(context); TaskName taskName; if (qualifiedName == null) { taskName = new TaskName(null, null, pos); } else { taskName = qualifiedNameToTaskName(qualifiedName); } if (createTableAsSelectStmt != null) { return new SubmitTaskStmt(taskName, properties, startIndex, createTableAsSelectStmt, pos); } else { return new SubmitTaskStmt(taskName, properties, startIndex, insertStmt, pos); } } @Override public ParseNode visitDropTaskStatement(StarRocksParser.DropTaskStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TaskName taskName = qualifiedNameToTaskName(qualifiedName); boolean force = context.FORCE() != null; return new DropTaskStmt(taskName, force, createPos(context)); } public static final ImmutableList<String> MATERIALIZEDVIEW_REFRESHSCHEME_SUPPORT_UNIT_IDENTIFIERS = new ImmutableList.Builder<String>() .add("SECOND").add("MINUTE").add("HOUR").add("DAY") .build(); private void checkMaterializedViewAsyncRefreshSchemeUnitIdentifier( AsyncRefreshSchemeDesc asyncRefreshSchemeDesc) { if (asyncRefreshSchemeDesc.getIntervalLiteral() == null || asyncRefreshSchemeDesc.getIntervalLiteral().getUnitIdentifier() == null) { return; } String unit = asyncRefreshSchemeDesc.getIntervalLiteral().getUnitIdentifier().getDescription(); if (StringUtils.isEmpty(unit)) { return; } if (!MATERIALIZEDVIEW_REFRESHSCHEME_SUPPORT_UNIT_IDENTIFIERS.contains(unit)) { throw new ParsingException(PARSER_ERROR_MSG.forbidClauseInMV("Refresh interval unit", unit), asyncRefreshSchemeDesc.getIntervalLiteral().getUnitIdentifier().getPos()); } } @Override public ParseNode visitCreateMaterializedViewStatement( StarRocksParser.CreateMaterializedViewStatementContext context) { boolean ifNotExist = context.IF() != null; QualifiedName qualifiedName = getQualifiedName(context.mvName); TableName tableName = qualifiedNameToTableName(qualifiedName); List<ColWithComment> colWithComments = null; if (!context.columnNameWithComment().isEmpty()) { colWithComments = visit(context.columnNameWithComment(), ColWithComment.class); } String comment = context.comment() == null ? null : ((StringLiteral) visit(context.comment().string())).getStringValue(); QueryStatement queryStatement = (QueryStatement) visit(context.queryStatement()); RefreshSchemeClause refreshSchemeDesc = null; Map<String, String> properties = new HashMap<>(); ExpressionPartitionDesc expressionPartitionDesc = null; DistributionDesc distributionDesc = null; List<String> sortKeys = null; for (StarRocksParser.MaterializedViewDescContext desc : ListUtils.emptyIfNull(context.materializedViewDesc())) { NodePosition clausePos = createPos(desc); if (desc.properties() != null) { if (MapUtils.isNotEmpty(properties)) { throw new ParsingException(PARSER_ERROR_MSG.duplicatedClause("PROPERTY"), clausePos); } List<Property> propertyList = visit(desc.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } if (desc.refreshSchemeDesc() != null) { if (refreshSchemeDesc != null) { throw new ParsingException(PARSER_ERROR_MSG.duplicatedClause("REFRESH"), clausePos); } refreshSchemeDesc = ((RefreshSchemeClause) visit(desc.refreshSchemeDesc())); } if (desc.primaryExpression() != null) { if (expressionPartitionDesc != null) { throw new ParsingException(PARSER_ERROR_MSG.duplicatedClause("PARTITION"), clausePos); } Expr expr = (Expr) visit(desc.primaryExpression()); if (expr instanceof SlotRef) { expressionPartitionDesc = new ExpressionPartitionDesc(expr); } else if (expr instanceof FunctionCallExpr) { AnalyzerUtils.checkAndExtractPartitionCol((FunctionCallExpr) expr, null); expressionPartitionDesc = new ExpressionPartitionDesc(expr); } else { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(expr.toSql(), "PARTITION BY"), expr.getPos()); } } if (desc.distributionDesc() != null) { if (distributionDesc != null) { throw new ParsingException(PARSER_ERROR_MSG.duplicatedClause("DISTRIBUTION"), clausePos); } distributionDesc = (DistributionDesc) visit(desc.distributionDesc()); } if (desc.orderByDesc() != null) { sortKeys = visit(desc.orderByDesc().identifierList().identifier(), Identifier.class) .stream().map(Identifier::getValue).collect(toList()); } } if (refreshSchemeDesc == null) { if (distributionDesc == null) { refreshSchemeDesc = new SyncRefreshSchemeDesc(); } else { refreshSchemeDesc = new ManualRefreshSchemeDesc(MaterializedView.RefreshMoment.IMMEDIATE, NodePosition.ZERO); } } if (refreshSchemeDesc instanceof SyncRefreshSchemeDesc) { if (expressionPartitionDesc != null) { throw new ParsingException(PARSER_ERROR_MSG.forbidClauseInMV("SYNC refresh type", "PARTITION BY"), expressionPartitionDesc.getPos()); } if (distributionDesc != null) { throw new ParsingException(PARSER_ERROR_MSG.forbidClauseInMV("SYNC refresh type", "DISTRIBUTION BY"), distributionDesc.getPos()); } return new CreateMaterializedViewStmt(tableName, queryStatement, properties); } if (refreshSchemeDesc instanceof AsyncRefreshSchemeDesc) { AsyncRefreshSchemeDesc asyncRefreshSchemeDesc = (AsyncRefreshSchemeDesc) refreshSchemeDesc; checkMaterializedViewAsyncRefreshSchemeUnitIdentifier(asyncRefreshSchemeDesc); } if (!Config.enable_experimental_mv) { throw new ParsingException(PARSER_ERROR_MSG.feConfigDisable("enable_experimental_mv"), NodePosition.ZERO); } return new CreateMaterializedViewStatement(tableName, ifNotExist, colWithComments, context.indexDesc() == null ? null : getIndexDefs(context.indexDesc()), comment, refreshSchemeDesc, expressionPartitionDesc, distributionDesc, sortKeys, properties, queryStatement, createPos(context)); } @Override public ParseNode visitShowMaterializedViewsStatement( StarRocksParser.ShowMaterializedViewsStatementContext context) { String database = null; NodePosition pos = createPos(context); if (context.qualifiedName() != null) { database = getQualifiedName(context.qualifiedName()).toString(); } if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new ShowMaterializedViewsStmt(database, stringLiteral.getValue(), null, pos); } else if (context.expression() != null) { return new ShowMaterializedViewsStmt(database, null, (Expr) visit(context.expression()), pos); } else { return new ShowMaterializedViewsStmt(database, null, null, pos); } } @Override public ParseNode visitDropMaterializedViewStatement(StarRocksParser.DropMaterializedViewStatementContext context) { QualifiedName mvQualifiedName = getQualifiedName(context.qualifiedName()); TableName mvName = qualifiedNameToTableName(mvQualifiedName); return new DropMaterializedViewStmt(context.IF() != null, mvName, createPos(context)); } @Override public ParseNode visitAlterMaterializedViewStatement( StarRocksParser.AlterMaterializedViewStatementContext context) { QualifiedName mvQualifiedName = getQualifiedName(context.qualifiedName()); TableName mvName = qualifiedNameToTableName(mvQualifiedName); AlterTableClause alterTableClause = null; if (context.tableRenameClause() != null) { alterTableClause = (TableRenameClause) visit(context.tableRenameClause()); } if (context.refreshSchemeDesc() != null) { alterTableClause = ((RefreshSchemeClause) visit(context.refreshSchemeDesc())); if (alterTableClause instanceof AsyncRefreshSchemeDesc) { AsyncRefreshSchemeDesc asyncRefreshSchemeDesc = (AsyncRefreshSchemeDesc) alterTableClause; checkMaterializedViewAsyncRefreshSchemeUnitIdentifier(asyncRefreshSchemeDesc); } } if (context.modifyPropertiesClause() != null) { alterTableClause = (ModifyTablePropertiesClause) visit(context.modifyPropertiesClause()); } if (context.statusDesc() != null) { String status = context.statusDesc().getText(); alterTableClause = new AlterMaterializedViewStatusClause(status, createPos(context)); } if (context.swapTableClause() != null) { alterTableClause = (SwapTableClause) visit(context.swapTableClause()); } return new AlterMaterializedViewStmt(mvName, alterTableClause, createPos(context)); } @Override public ParseNode visitRefreshMaterializedViewStatement( StarRocksParser.RefreshMaterializedViewStatementContext context) { QualifiedName mvQualifiedName = getQualifiedName(context.qualifiedName()); TableName mvName = qualifiedNameToTableName(mvQualifiedName); PartitionRangeDesc partitionRangeDesc = null; if (context.partitionRangeDesc() != null) { partitionRangeDesc = (PartitionRangeDesc) visit(context.partitionRangeDesc()); } return new RefreshMaterializedViewStatement(mvName, partitionRangeDesc, context.FORCE() != null, context.SYNC() != null, createPos(context)); } @Override public ParseNode visitCancelRefreshMaterializedViewStatement( StarRocksParser.CancelRefreshMaterializedViewStatementContext context) { QualifiedName mvQualifiedName = getQualifiedName(context.qualifiedName()); TableName mvName = qualifiedNameToTableName(mvQualifiedName); return new CancelRefreshMaterializedViewStmt(mvName, createPos(context)); } @Override public ParseNode visitCreateExternalCatalogStatement( StarRocksParser.CreateExternalCatalogStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String catalogName = identifier.getValue(); String comment = null; if (context.comment() != null) { comment = ((StringLiteral) visit(context.comment())).getStringValue(); } Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new CreateCatalogStmt(catalogName, comment, properties, createPos(context)); } @Override public ParseNode visitDropExternalCatalogStatement(StarRocksParser.DropExternalCatalogStatementContext context) { Identifier identifier = (Identifier) visit(context.catalogName); String catalogName = identifier.getValue(); return new DropCatalogStmt(catalogName, createPos(context)); } @Override public ParseNode visitShowCreateExternalCatalogStatement( StarRocksParser.ShowCreateExternalCatalogStatementContext context) { Identifier identifier = (Identifier) visit(context.catalogName); String catalogName = identifier.getValue(); return new ShowCreateExternalCatalogStmt(catalogName, createPos(context)); } @Override public ParseNode visitShowCatalogsStatement(StarRocksParser.ShowCatalogsStatementContext context) { return new ShowCatalogsStmt(createPos(context)); } @Override public ParseNode visitAlterCatalogStatement(StarRocksParser.AlterCatalogStatementContext context) { String catalogName = ((Identifier) visit(context.catalogName)).getValue(); AlterClause alterClause = (AlterClause) visit(context.modifyPropertiesClause()); return new AlterCatalogStmt(catalogName, alterClause, createPos(context)); } @Override public ParseNode visitShowWarehousesStatement(StarRocksParser.ShowWarehousesStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowWarehousesStmt(pattern, where, createPos(context)); } @Override public ParseNode visitInsertStatement(StarRocksParser.InsertStatementContext context) { QueryStatement queryStatement; if (context.VALUES() != null) { List<ValueList> rowValues = visit(context.expressionsWithDefault(), ValueList.class); List<List<Expr>> rows = rowValues.stream().map(ValueList::getRow).collect(toList()); List<String> colNames = new ArrayList<>(); for (int i = 0; i < rows.get(0).size(); ++i) { colNames.add("column_" + i); } queryStatement = new QueryStatement(new ValuesRelation(rows, colNames, createPos(context.VALUES().getSymbol(), context.stop))); } else { queryStatement = (QueryStatement) visit(context.queryStatement()); } if (context.explainDesc() != null) { queryStatement.setIsExplain(true, getExplainType(context.explainDesc())); } if (context.qualifiedName() != null) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { partitionNames = (PartitionNames) visit(context.partitionNames()); } InsertStmt stmt = new InsertStmt(targetTableName, partitionNames, context.label == null ? null : ((Identifier) visit(context.label)).getValue(), getColumnNames(context.columnAliases()), queryStatement, context.OVERWRITE() != null, createPos(context)); stmt.setHintNodes(hintMap.get(context)); return stmt; } if (context.BLACKHOLE() != null) { return new InsertStmt(queryStatement, createPos(context)); } Map<String, String> tableFunctionProperties = getPropertyList(context.propertyList()); InsertStmt res = new InsertStmt(tableFunctionProperties, queryStatement, createPos(context)); res.setHintNodes(hintMap.get(context)); return res; } @Override public ParseNode visitUpdateStatement(StarRocksParser.UpdateStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); List<ColumnAssignment> assignments = visit(context.assignmentList().assignment(), ColumnAssignment.class); List<Relation> fromRelations = null; if (context.fromClause() instanceof StarRocksParser.DualContext) { ValuesRelation valuesRelation = ValuesRelation.newDualRelation(createPos(context.fromClause())); fromRelations = Lists.newArrayList(valuesRelation); } else { StarRocksParser.FromContext fromContext = (StarRocksParser.FromContext) context.fromClause(); if (fromContext.relations() != null) { fromRelations = visit(fromContext.relations().relation(), Relation.class); } } Expr where = context.where != null ? (Expr) visit(context.where) : null; List<CTERelation> ctes = null; if (context.withClause() != null) { ctes = visit(context.withClause().commonTableExpression(), CTERelation.class); } UpdateStmt ret = new UpdateStmt(targetTableName, assignments, fromRelations, where, ctes, createPos(context)); if (context.explainDesc() != null) { ret.setIsExplain(true, getExplainType(context.explainDesc())); if (StatementBase.ExplainLevel.ANALYZE.equals(ret.getExplainLevel())) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedOp("analyze")); } } ret.setHintNodes(hintMap.get(context)); return ret; } @Override public ParseNode visitDeleteStatement(StarRocksParser.DeleteStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { partitionNames = (PartitionNames) visit(context.partitionNames()); } List<Relation> usingRelations = context.using != null ? visit(context.using.relation(), Relation.class) : null; Expr where = context.where != null ? (Expr) visit(context.where) : null; List<CTERelation> ctes = null; if (context.withClause() != null) { ctes = visit(context.withClause().commonTableExpression(), CTERelation.class); } DeleteStmt ret = new DeleteStmt(targetTableName, partitionNames, usingRelations, where, ctes, createPos(context)); if (context.explainDesc() != null) { ret.setIsExplain(true, getExplainType(context.explainDesc())); if (StatementBase.ExplainLevel.ANALYZE.equals(ret.getExplainLevel())) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedOp("analyze")); } } ret.setHintNodes(hintMap.get(context)); return ret; } @Override public ParseNode visitCreateRoutineLoadStatement(StarRocksParser.CreateRoutineLoadStatementContext context) { QualifiedName tableName = null; if (context.table != null) { tableName = getQualifiedName(context.table); } List<StarRocksParser.LoadPropertiesContext> loadPropertiesContexts = context.loadProperties(); List<ParseNode> loadPropertyList = getLoadPropertyList(loadPropertiesContexts); String typeName = context.source.getText(); Map<String, String> jobProperties = getJobProperties(context.jobProperties()); Map<String, String> dataSourceProperties = getDataSourceProperties(context.dataSourceProperties()); return new CreateRoutineLoadStmt(createLabelName(context.db, context.name), tableName == null ? null : tableName.toString(), loadPropertyList, jobProperties, typeName, dataSourceProperties, createPos(context)); } @Override public ParseNode visitShowCreateRoutineLoadStatement( StarRocksParser.ShowCreateRoutineLoadStatementContext context) { return new ShowCreateRoutineLoadStmt(createLabelName(context.db, context.name)); } @Override public ParseNode visitAlterRoutineLoadStatement(StarRocksParser.AlterRoutineLoadStatementContext context) { NodePosition pos = createPos(context); List<StarRocksParser.LoadPropertiesContext> loadPropertiesContexts = context.loadProperties(); List<ParseNode> loadPropertyList = getLoadPropertyList(loadPropertiesContexts); Map<String, String> jobProperties = getJobProperties(context.jobProperties()); if (context.dataSource() != null) { String typeName = context.dataSource().source.getText(); Map<String, String> dataSourceProperties = getDataSourceProperties(context.dataSource().dataSourceProperties()); RoutineLoadDataSourceProperties dataSource = new RoutineLoadDataSourceProperties(typeName, dataSourceProperties, createPos(context.dataSource())); return new AlterRoutineLoadStmt(createLabelName(context.db, context.name), loadPropertyList, jobProperties, dataSource, pos); } return new AlterRoutineLoadStmt(createLabelName(context.db, context.name), loadPropertyList, jobProperties, new RoutineLoadDataSourceProperties(), pos); } @Override public ParseNode visitAlterLoadStatement(StarRocksParser.AlterLoadStatementContext context) { Map<String, String> jobProperties = getJobProperties(context.jobProperties()); return new AlterLoadStmt(createLabelName(context.db, context.name), jobProperties, createPos(context)); } @Override public ParseNode visitStopRoutineLoadStatement(StarRocksParser.StopRoutineLoadStatementContext context) { return new StopRoutineLoadStmt(createLabelName(context.db, context.name), createPos(context)); } @Override public ParseNode visitResumeRoutineLoadStatement(StarRocksParser.ResumeRoutineLoadStatementContext context) { return new ResumeRoutineLoadStmt(createLabelName(context.db, context.name), createPos(context)); } @Override public ParseNode visitPauseRoutineLoadStatement(StarRocksParser.PauseRoutineLoadStatementContext context) { return new PauseRoutineLoadStmt(createLabelName(context.db, context.name), createPos(context)); } @Override public ParseNode visitShowRoutineLoadStatement(StarRocksParser.ShowRoutineLoadStatementContext context) { boolean isVerbose = context.ALL() != null; String database = null; Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowRoutineLoadStmt(createLabelName(context.db, context.name), isVerbose, where, orderByElements, limitElement, createPos(context)); } @Override public ParseNode visitShowRoutineLoadTaskStatement(StarRocksParser.ShowRoutineLoadTaskStatementContext context) { QualifiedName dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowRoutineLoadTaskStmt(dbName == null ? null : dbName.toString(), where, createPos(context)); } @Override public ParseNode visitShowStreamLoadStatement(StarRocksParser.ShowStreamLoadStatementContext context) { boolean isVerbose = context.ALL() != null; String database = null; Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowStreamLoadStmt(createLabelName(context.db, context.name), isVerbose, where, orderByElements, limitElement, createPos(context)); } @Override public ParseNode visitAdminSetConfigStatement(StarRocksParser.AdminSetConfigStatementContext context) { Property config = (Property) visitProperty(context.property()); return new AdminSetConfigStmt(AdminSetConfigStmt.ConfigType.FRONTEND, config, createPos(context)); } @Override public ParseNode visitAdminSetReplicaStatusStatement( StarRocksParser.AdminSetReplicaStatusStatementContext context) { List<Property> propertyList = visit(context.properties().property(), Property.class); return new AdminSetReplicaStatusStmt(new PropertySet(propertyList, createPos(context.properties())), createPos(context)); } @Override public ParseNode visitAdminShowConfigStatement(StarRocksParser.AdminShowConfigStatementContext context) { NodePosition pos = createPos(context); if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new AdminShowConfigStmt(AdminSetConfigStmt.ConfigType.FRONTEND, stringLiteral.getValue(), pos); } return new AdminShowConfigStmt(AdminSetConfigStmt.ConfigType.FRONTEND, null, pos); } @Override public ParseNode visitAdminShowReplicaDistributionStatement( StarRocksParser.AdminShowReplicaDistributionStatementContext context) { Token start = context.qualifiedName().start; Token stop = context.qualifiedName().stop; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } return new AdminShowReplicaDistributionStmt(new TableRef(targetTableName, null, partitionNames, createPos(start, stop)), createPos(context)); } @Override public ParseNode visitAdminShowReplicaStatusStatement( StarRocksParser.AdminShowReplicaStatusStatementContext context) { Token start = context.qualifiedName().start; Token stop = context.qualifiedName().stop; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); Expr where = context.where != null ? (Expr) visit(context.where) : null; PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } return new AdminShowReplicaStatusStmt( new TableRef(targetTableName, null, partitionNames, createPos(start, stop)), where, createPos(context)); } @Override public ParseNode visitAdminRepairTableStatement(StarRocksParser.AdminRepairTableStatementContext context) { Token start = context.qualifiedName().start; Token stop = context.qualifiedName().stop; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } return new AdminRepairTableStmt(new TableRef(targetTableName, null, partitionNames, createPos(start, stop)), createPos(context)); } @Override public ParseNode visitAdminCancelRepairTableStatement( StarRocksParser.AdminCancelRepairTableStatementContext context) { Token start = context.qualifiedName().start; Token stop = context.qualifiedName().stop; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName targetTableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } return new AdminCancelRepairTableStmt( new TableRef(targetTableName, null, partitionNames, createPos(start, stop)), createPos(context)); } @Override public ParseNode visitAdminCheckTabletsStatement(StarRocksParser.AdminCheckTabletsStatementContext context) { List<Long> tabletIds = Lists.newArrayList(); if (context.tabletList() != null) { tabletIds = context.tabletList().INTEGER_VALUE().stream().map(ParseTree::getText) .map(Long::parseLong).collect(toList()); } return new AdminCheckTabletsStmt(tabletIds, (Property) visitProperty(context.property()), createPos(context)); } @Override public ParseNode visitKillStatement(StarRocksParser.KillStatementContext context) { NodePosition pos = createPos(context); long id = Long.parseLong(context.INTEGER_VALUE().getText()); if (context.QUERY() != null) { return new KillStmt(false, id, pos); } else { return new KillStmt(true, id, pos); } } @Override public ParseNode visitSyncStatement(StarRocksParser.SyncStatementContext context) { return new SyncStmt(createPos(context)); } @Override public ParseNode visitAlterSystemStatement(StarRocksParser.AlterSystemStatementContext context) { return new AlterSystemStmt((AlterClause) visit(context.alterClause()), createPos(context)); } @Override public ParseNode visitCancelAlterSystemStatement(StarRocksParser.CancelAlterSystemStatementContext context) { return new CancelAlterSystemStmt(visit(context.string(), StringLiteral.class) .stream().map(StringLiteral::getValue).collect(toList()), createPos(context)); } @Override public ParseNode visitShowComputeNodesStatement(StarRocksParser.ShowComputeNodesStatementContext context) { return new ShowComputeNodesStmt(createPos(context)); } @Override public ParseNode visitAnalyzeStatement(StarRocksParser.AnalyzeStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); List<Identifier> columns = visitIfPresent(context.identifier(), Identifier.class); List<String> columnNames = null; if (columns != null) { columnNames = columns.stream().map(Identifier::getValue).collect(toList()); } Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new AnalyzeStmt(tableName, columnNames, properties, context.SAMPLE() != null, context.ASYNC() != null, new AnalyzeBasicDesc(), createPos(context)); } @Override public ParseNode visitDropStatsStatement(StarRocksParser.DropStatsStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); return new DropStatsStmt(tableName, createPos(context)); } @Override public ParseNode visitCreateAnalyzeStatement(StarRocksParser.CreateAnalyzeStatementContext context) { NodePosition pos = createPos(context); Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } if (context.DATABASE() != null) { return new CreateAnalyzeJobStmt(((Identifier) visit(context.db)).getValue(), context.FULL() == null, properties, pos); } else if (context.TABLE() != null) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); List<Identifier> columns = visitIfPresent(context.identifier(), Identifier.class); List<String> columnNames = null; if (columns != null) { columnNames = columns.stream().map(Identifier::getValue).collect(toList()); } return new CreateAnalyzeJobStmt(tableName, columnNames, context.SAMPLE() != null, properties, pos); } else { return new CreateAnalyzeJobStmt(context.FULL() == null, properties, pos); } } @Override public ParseNode visitDropAnalyzeJobStatement(StarRocksParser.DropAnalyzeJobStatementContext context) { return new DropAnalyzeJobStmt(Long.parseLong(context.INTEGER_VALUE().getText()), createPos(context)); } @Override public ParseNode visitShowAnalyzeStatement(StarRocksParser.ShowAnalyzeStatementContext context) { Predicate predicate = null; NodePosition pos = createPos(context); if (context.expression() != null) { predicate = (Predicate) visit(context.expression()); } if (context.STATUS() != null) { return new ShowAnalyzeStatusStmt(predicate, pos); } else if (context.JOB() != null) { return new ShowAnalyzeJobStmt(predicate, pos); } else { return new ShowAnalyzeJobStmt(predicate, pos); } } @Override public ParseNode visitShowStatsMetaStatement(StarRocksParser.ShowStatsMetaStatementContext context) { Predicate predicate = null; if (context.expression() != null) { predicate = (Predicate) visit(context.expression()); } return new ShowBasicStatsMetaStmt(predicate, createPos(context)); } @Override public ParseNode visitShowHistogramMetaStatement(StarRocksParser.ShowHistogramMetaStatementContext context) { Predicate predicate = null; if (context.expression() != null) { predicate = (Predicate) visit(context.expression()); } return new ShowHistogramStatsMetaStmt(predicate, createPos(context)); } @Override public ParseNode visitAnalyzeHistogramStatement(StarRocksParser.AnalyzeHistogramStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); List<Identifier> columns = visitIfPresent(context.identifier(), Identifier.class); List<String> columnNames = null; if (columns != null) { columnNames = columns.stream().map(Identifier::getValue).collect(toList()); } Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } long bucket; if (context.bucket != null) { bucket = Long.parseLong(context.bucket.getText()); } else { bucket = Config.histogram_buckets_size; } return new AnalyzeStmt(tableName, columnNames, properties, true, context.ASYNC() != null, new AnalyzeHistogramDesc(bucket), createPos(context)); } @Override public ParseNode visitDropHistogramStatement(StarRocksParser.DropHistogramStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); List<Identifier> columns = visitIfPresent(context.identifier(), Identifier.class); List<String> columnNames = null; if (columns != null) { columnNames = columns.stream().map(Identifier::getValue).collect(toList()); } return new DropHistogramStmt(tableName, columnNames, createPos(context)); } @Override public ParseNode visitKillAnalyzeStatement(StarRocksParser.KillAnalyzeStatementContext context) { return new KillAnalyzeStmt(Long.parseLong(context.INTEGER_VALUE().getText()), createPos(context)); } @Override public ParseNode visitAnalyzeProfileStatement(StarRocksParser.AnalyzeProfileStatementContext context) { StringLiteral stringLiteral = (StringLiteral) visit(context.string()); List<Integer> planNodeIds = Lists.newArrayList(); if (context.INTEGER_VALUE() != null) { planNodeIds = context.INTEGER_VALUE().stream() .map(ParseTree::getText) .map(Integer::parseInt) .collect(toList()); } return new AnalyzeProfileStmt(stringLiteral.getStringValue(), planNodeIds, createPos(context)); } public ParseNode visitCreateResourceGroupStatement(StarRocksParser.CreateResourceGroupStatementContext context) { Identifier identifier = (Identifier) visit(context.identifier()); String name = identifier.getValue(); List<List<Predicate>> predicatesList = new ArrayList<>(); for (StarRocksParser.ClassifierContext classifierContext : context.classifier()) { List<Predicate> p = visit(classifierContext.expressionList().expression(), Predicate.class); predicatesList.add(p); } Map<String, String> properties = new HashMap<>(); List<Property> propertyList = visit(context.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } return new CreateResourceGroupStmt(name, context.EXISTS() != null, context.REPLACE() != null, predicatesList, properties, createPos(context)); } @Override public ParseNode visitDropResourceGroupStatement(StarRocksParser.DropResourceGroupStatementContext context) { Identifier identifier = (Identifier) visit(context.identifier()); return new DropResourceGroupStmt(identifier.getValue(), createPos(context)); } @Override public ParseNode visitAlterResourceGroupStatement(StarRocksParser.AlterResourceGroupStatementContext context) { Identifier identifier = (Identifier) visit(context.identifier()); String name = identifier.getValue(); NodePosition pos = createPos(context); if (context.ADD() != null) { List<List<Predicate>> predicatesList = new ArrayList<>(); for (StarRocksParser.ClassifierContext classifierContext : context.classifier()) { List<Predicate> p = visit(classifierContext.expressionList().expression(), Predicate.class); predicatesList.add(p); } return new AlterResourceGroupStmt(name, new AlterResourceGroupStmt.AddClassifiers(predicatesList), pos); } else if (context.DROP() != null) { if (context.ALL() != null) { return new AlterResourceGroupStmt(name, new AlterResourceGroupStmt.DropAllClassifiers(), pos); } else { return new AlterResourceGroupStmt(name, new AlterResourceGroupStmt.DropClassifiers(context.INTEGER_VALUE() .stream().map(ParseTree::getText).map(Long::parseLong).collect(toList())), pos); } } else { Map<String, String> properties = new HashMap<>(); List<Property> propertyList = visit(context.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } return new AlterResourceGroupStmt(name, new AlterResourceGroupStmt.AlterProperties(properties), pos); } } @Override public ParseNode visitShowResourceGroupStatement(StarRocksParser.ShowResourceGroupStatementContext context) { NodePosition pos = createPos(context); if (context.GROUPS() != null) { return new ShowResourceGroupStmt(null, context.ALL() != null, pos); } else { Identifier identifier = (Identifier) visit(context.identifier()); return new ShowResourceGroupStmt(identifier.getValue(), false, pos); } } public ParseNode visitCreateResourceStatement(StarRocksParser.CreateResourceStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new CreateResourceStmt(context.EXTERNAL() != null, identifier.getValue(), properties, createPos(context)); } public ParseNode visitDropResourceStatement(StarRocksParser.DropResourceStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); return new DropResourceStmt(identifier.getValue(), createPos(context)); } public ParseNode visitAlterResourceStatement(StarRocksParser.AlterResourceStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new AlterResourceStmt(identifier.getValue(), properties, createPos(context)); } public ParseNode visitShowResourceStatement(StarRocksParser.ShowResourceStatementContext context) { return new ShowResourcesStmt(createPos(context)); } @Override public ParseNode visitLoadStatement(StarRocksParser.LoadStatementContext context) { NodePosition pos = createPos(context); LabelName label = getLabelName(context.labelName()); List<DataDescription> dataDescriptions = null; if (context.data != null) { dataDescriptions = context.data.dataDesc().stream().map(this::getDataDescription) .collect(toList()); } Map<String, String> properties = null; if (context.props != null) { properties = Maps.newHashMap(); List<Property> propertyList = visit(context.props.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } if (context.resource != null) { ResourceDesc resourceDesc = getResourceDesc(context.resource); return new LoadStmt(label, dataDescriptions, resourceDesc, properties, pos); } BrokerDesc brokerDesc = getBrokerDesc(context.broker); String cluster = null; if (context.system != null) { cluster = ((Identifier) visit(context.system)).getValue(); } LoadStmt stmt = new LoadStmt(label, dataDescriptions, brokerDesc, cluster, properties, pos); stmt.setHintNodes(hintMap.get(context)); return stmt; } private LabelName getLabelName(StarRocksParser.LabelNameContext context) { String label = ((Identifier) visit(context.label)).getValue(); String db = ""; if (context.db != null) { db = ((Identifier) visit(context.db)).getValue(); } return new LabelName(db, label, createPos(context)); } private DataDescription getDataDescription(StarRocksParser.DataDescContext context) { NodePosition pos = createPos(context); String dstTableName = ((Identifier) visit(context.dstTableName)).getValue(); PartitionNames partitionNames = (PartitionNames) visitIfPresent(context.partitions); Expr whereExpr = (Expr) visitIfPresent(context.where); List<Expr> colMappingList = null; if (context.colMappingList != null) { colMappingList = visit(context.colMappingList.expressionList().expression(), Expr.class); } if (context.srcTableName != null) { String srcTableName = ((Identifier) visit(context.srcTableName)).getValue(); return new DataDescription(dstTableName, partitionNames, srcTableName, context.NEGATIVE() != null, colMappingList, whereExpr, pos); } List<String> files = context.srcFiles.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()) .collect(toList()); ColumnSeparator colSep = getColumnSeparator(context.colSep); RowDelimiter rowDelimiter = getRowDelimiter(context.rowSep); String format = null; if (context.format != null) { if (context.format.identifier() != null) { format = ((Identifier) visit(context.format.identifier())).getValue(); } else if (context.format.string() != null) { format = ((StringLiteral) visit(context.format.string())).getStringValue(); } } List<String> colList = null; if (context.colList != null) { List<Identifier> identifiers = visit(context.colList.identifier(), Identifier.class); colList = identifiers.stream().map(Identifier::getValue).collect(toList()); } List<String> colFromPath = null; if (context.colFromPath != null) { List<Identifier> identifiers = visit(context.colFromPath.identifier(), Identifier.class); colFromPath = identifiers.stream().map(Identifier::getValue).collect(toList()); } StarRocksParser.FormatPropsContext formatPropsContext; CsvFormat csvFormat; if (context.formatPropsField != null) { formatPropsContext = context.formatProps(); String escape = null; if (formatPropsContext.escapeCharacter != null) { StringLiteral stringLiteral = (StringLiteral) visit(formatPropsContext.escapeCharacter); escape = stringLiteral.getValue(); } String enclose = null; if (formatPropsContext.encloseCharacter != null) { StringLiteral stringLiteral = (StringLiteral) visit(formatPropsContext.encloseCharacter); enclose = stringLiteral.getValue(); } long skipheader = 0; if (formatPropsContext.INTEGER_VALUE() != null) { skipheader = Long.parseLong(formatPropsContext.INTEGER_VALUE().getText()); if (skipheader < 0) { skipheader = 0; } } boolean trimspace = false; if (formatPropsContext.booleanValue() != null) { trimspace = Boolean.parseBoolean(formatPropsContext.booleanValue().getText()); } csvFormat = new CsvFormat(enclose == null ? 0 : (byte) enclose.charAt(0), escape == null ? 0 : (byte) escape.charAt(0), skipheader, trimspace); } else { csvFormat = new CsvFormat((byte) 0, (byte) 0, 0, false); } return new DataDescription(dstTableName, partitionNames, files, colList, colSep, rowDelimiter, format, colFromPath, context.NEGATIVE() != null, colMappingList, whereExpr, csvFormat, createPos(context)); } private ColumnSeparator getColumnSeparator(StarRocksParser.StringContext context) { if (context != null) { String sep = ((StringLiteral) visit(context)).getValue(); return new ColumnSeparator(sep); } return null; } private RowDelimiter getRowDelimiter(StarRocksParser.StringContext context) { if (context != null) { String sep = ((StringLiteral) visit(context)).getValue(); return new RowDelimiter(sep); } return null; } private BrokerDesc getBrokerDesc(StarRocksParser.BrokerDescContext context) { if (context != null) { NodePosition pos = createPos(context); Map<String, String> properties = null; if (context.props != null) { properties = Maps.newHashMap(); List<Property> propertyList = visit(context.props.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } if (context.identifierOrString() != null) { String brokerName = ((Identifier) visit(context.identifierOrString())).getValue(); return new BrokerDesc(brokerName, properties, pos); } else { return new BrokerDesc(properties, pos); } } return null; } private ResourceDesc getResourceDesc(StarRocksParser.ResourceDescContext context) { if (context != null) { String brokerName = ((Identifier) visit(context.identifierOrString())).getValue(); Map<String, String> properties = null; if (context.props != null) { properties = Maps.newHashMap(); List<Property> propertyList = visit(context.props.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new ResourceDesc(brokerName, properties, createPos(context)); } return null; } @Override public ParseNode visitShowLoadStatement(StarRocksParser.ShowLoadStatementContext context) { String db = null; if (context.identifier() != null) { db = ((Identifier) visit(context.identifier())).getValue(); } Expr labelExpr = null; if (context.expression() != null) { labelExpr = (Expr) visit(context.expression()); } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } boolean all = context.ALL() != null; ShowLoadStmt res = new ShowLoadStmt(db, labelExpr, orderByElements, limitElement, createPos(context)); res.setAll(all); return res; } @Override public ParseNode visitShowLoadWarningsStatement(StarRocksParser.ShowLoadWarningsStatementContext context) { if (context.ON() != null) { String url = ((StringLiteral) visit(context.string())).getValue(); return new ShowLoadWarningsStmt(null, url, null, null); } String db = null; if (context.identifier() != null) { db = ((Identifier) visit(context.identifier())).getValue(); } Expr labelExpr = null; if (context.expression() != null) { labelExpr = (Expr) visit(context.expression()); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } return new ShowLoadWarningsStmt(db, null, labelExpr, limitElement, createPos(context)); } @Override public ParseNode visitCancelLoadStatement(StarRocksParser.CancelLoadStatementContext context) { String db = null; if (context.identifier() != null) { db = ((Identifier) visit(context.identifier())).getValue(); } Expr labelExpr = null; if (context.expression() != null) { labelExpr = (Expr) visit(context.expression()); } return new CancelLoadStmt(db, labelExpr, createPos(context)); } @Override public ParseNode visitCancelCompactionStatement(StarRocksParser.CancelCompactionStatementContext context) { Expr txnIdExpr = null; if (context.expression() != null) { txnIdExpr = (Expr) visit(context.expression()); } return new CancelCompactionStmt(txnIdExpr, createPos(context)); } @Override public ParseNode visitShowAuthorStatement(StarRocksParser.ShowAuthorStatementContext context) { return new ShowAuthorStmt(createPos(context)); } @Override public ParseNode visitShowBackendsStatement(StarRocksParser.ShowBackendsStatementContext context) { return new ShowBackendsStmt(createPos(context)); } @Override public ParseNode visitShowBrokerStatement(StarRocksParser.ShowBrokerStatementContext context) { return new ShowBrokerStmt(createPos(context)); } @Override public ParseNode visitShowCharsetStatement(StarRocksParser.ShowCharsetStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowCharsetStmt(pattern, where, createPos(context)); } @Override public ParseNode visitShowCollationStatement(StarRocksParser.ShowCollationStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowCollationStmt(pattern, where, createPos(context)); } @Override public ParseNode visitShowDeleteStatement(StarRocksParser.ShowDeleteStatementContext context) { QualifiedName dbName = null; if (context.qualifiedName() != null) { dbName = getQualifiedName(context.db); } return new ShowDeleteStmt(dbName == null ? null : dbName.toString(), createPos(context)); } @Override public ParseNode visitShowDynamicPartitionStatement(StarRocksParser.ShowDynamicPartitionStatementContext context) { QualifiedName dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db); } return new ShowDynamicPartitionStmt(dbName == null ? null : dbName.toString(), createPos(context)); } @Override public ParseNode visitShowEventsStatement(StarRocksParser.ShowEventsStatementContext context) { return new ShowEventsStmt(createPos(context)); } @Override public ParseNode visitShowEnginesStatement(StarRocksParser.ShowEnginesStatementContext context) { return new ShowEnginesStmt(createPos(context)); } @Override public ParseNode visitShowFrontendsStatement(StarRocksParser.ShowFrontendsStatementContext context) { return new ShowFrontendsStmt(createPos(context)); } @Override public ParseNode visitShowPluginsStatement(StarRocksParser.ShowPluginsStatementContext context) { return new ShowPluginsStmt(createPos(context)); } @Override public ParseNode visitShowRepositoriesStatement(StarRocksParser.ShowRepositoriesStatementContext context) { return new ShowRepositoriesStmt(createPos(context)); } @Override public ParseNode visitShowOpenTableStatement(StarRocksParser.ShowOpenTableStatementContext context) { return new ShowOpenTableStmt(createPos(context)); } @Override public ParseNode visitShowProcedureStatement(StarRocksParser.ShowProcedureStatementContext context) { NodePosition pos = createPos(context); if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new ShowProcedureStmt(stringLiteral.getValue(), null, pos); } else if (context.expression() != null) { return new ShowProcedureStmt(null, (Expr) visit(context.expression()), pos); } else { return new ShowProcedureStmt(null, null, pos); } } @Override public ParseNode visitShowProcStatement(StarRocksParser.ShowProcStatementContext context) { StringLiteral stringLiteral = (StringLiteral) visit(context.path); return new ShowProcStmt(stringLiteral.getValue(), createPos(context)); } @Override public ParseNode visitShowProcesslistStatement(StarRocksParser.ShowProcesslistStatementContext context) { boolean isShowFull = context.FULL() != null; return new ShowProcesslistStmt(isShowFull, createPos(context)); } @Override public ParseNode visitShowProfilelistStatement(StarRocksParser.ShowProfilelistStatementContext context) { int limit = context.LIMIT() != null ? Integer.parseInt(context.limit.getText()) : -1; return new ShowProfilelistStmt(limit, createPos(context)); } @Override public ParseNode visitShowRunningQueriesStatement(StarRocksParser.ShowRunningQueriesStatementContext context) { int limit = context.LIMIT() != null ? Integer.parseInt(context.limit.getText()) : -1; return new ShowRunningQueriesStmt(limit, createPos(context)); } @Override public ParseNode visitShowResourceGroupUsageStatement( StarRocksParser.ShowResourceGroupUsageStatementContext context) { if (context.GROUPS() != null) { return new ShowResourceGroupUsageStmt(null, createPos(context)); } Identifier groupName = (Identifier) visit(context.identifier()); return new ShowResourceGroupUsageStmt(groupName.getValue(), createPos(context)); } @Override public ParseNode visitShowTransactionStatement(StarRocksParser.ShowTransactionStatementContext context) { String database = null; if (context.qualifiedName() != null) { database = getQualifiedName(context.qualifiedName()).toString(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowTransactionStmt(database, where, createPos(context)); } @Override public ParseNode visitShowStatusStatement(StarRocksParser.ShowStatusStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowStatusStmt(getVariableType(context.varType()), pattern, where, createPos(context)); } @Override public ParseNode visitShowTriggersStatement(StarRocksParser.ShowTriggersStatementContext context) { return new ShowTriggersStmt(createPos(context)); } @Override public ParseNode visitShowUserPropertyStatement(StarRocksParser.ShowUserPropertyStatementContext context) { String user; String pattern; if (context.FOR() == null) { user = null; pattern = context.LIKE() == null ? null : ((StringLiteral) visit(context.string(0))).getValue(); } else { user = ((StringLiteral) visit(context.string(0))).getValue(); pattern = context.LIKE() == null ? null : ((StringLiteral) visit(context.string(1))).getValue(); } return new ShowUserPropertyStmt(user, pattern, createPos(context)); } @Override public ParseNode visitShowVariablesStatement(StarRocksParser.ShowVariablesStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowVariablesStmt(getVariableType(context.varType()), pattern, where, createPos(context)); } @Override public ParseNode visitShowWarningStatement(StarRocksParser.ShowWarningStatementContext context) { NodePosition pos = createPos(context); if (context.limitElement() != null) { return new ShowWarningStmt((LimitElement) visit(context.limitElement()), pos); } return new ShowWarningStmt(null, pos); } @Override public ParseNode visitHelpStatement(StarRocksParser.HelpStatementContext context) { String mask = ((Identifier) visit(context.identifierOrString())).getValue(); return new HelpStmt(mask, createPos(context)); } @Override public ParseNode visitBackupStatement(StarRocksParser.BackupStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); LabelName labelName = qualifiedNameToLabelName(qualifiedName); List<TableRef> tblRefs = new ArrayList<>(); for (StarRocksParser.TableDescContext tableDescContext : context.tableDesc()) { StarRocksParser.QualifiedNameContext qualifiedNameContext = tableDescContext.qualifiedName(); qualifiedName = getQualifiedName(qualifiedNameContext); TableName tableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (tableDescContext.partitionNames() != null) { partitionNames = (PartitionNames) visit(tableDescContext.partitionNames()); } TableRef tableRef = new TableRef(tableName, null, partitionNames, createPos(tableDescContext)); tblRefs.add(tableRef); } Map<String, String> properties = null; if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } String repoName = ((Identifier) visit(context.identifier())).getValue(); return new BackupStmt(labelName, repoName, tblRefs, properties, createPos(context)); } @Override public ParseNode visitCancelBackupStatement(StarRocksParser.CancelBackupStatementContext context) { if (context.identifier() == null) { throw new ParsingException(PARSER_ERROR_MSG.nullIdentifierCancelBackupRestore()); } return new CancelBackupStmt(((Identifier) visit(context.identifier())).getValue(), false, createPos(context)); } @Override public ParseNode visitShowBackupStatement(StarRocksParser.ShowBackupStatementContext context) { NodePosition pos = createPos(context); if (context.identifier() == null) { return new ShowBackupStmt(null, pos); } return new ShowBackupStmt(((Identifier) visit(context.identifier())).getValue(), pos); } @Override public ParseNode visitRestoreStatement(StarRocksParser.RestoreStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); LabelName labelName = qualifiedNameToLabelName(qualifiedName); List<TableRef> tblRefs = new ArrayList<>(); for (StarRocksParser.RestoreTableDescContext tableDescContext : context.restoreTableDesc()) { StarRocksParser.QualifiedNameContext qualifiedNameContext = tableDescContext.qualifiedName(); qualifiedName = getQualifiedName(qualifiedNameContext); TableName tableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (tableDescContext.partitionNames() != null) { partitionNames = (PartitionNames) visit(tableDescContext.partitionNames()); } String alias = null; if (tableDescContext.identifier() != null) { alias = ((Identifier) visit(tableDescContext.identifier())).getValue(); } TableRef tableRef = new TableRef(tableName, alias, partitionNames, createPos(tableDescContext)); tblRefs.add(tableRef); } Map<String, String> properties = null; if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } String repoName = ((Identifier) visit(context.identifier())).getValue(); return new RestoreStmt(labelName, repoName, tblRefs, properties, createPos(context)); } @Override public ParseNode visitCancelRestoreStatement(StarRocksParser.CancelRestoreStatementContext context) { if (context.identifier() == null) { throw new ParsingException(PARSER_ERROR_MSG.nullIdentifierCancelBackupRestore()); } return new CancelBackupStmt(((Identifier) visit(context.identifier())).getValue(), true, createPos(context)); } @Override public ParseNode visitShowRestoreStatement(StarRocksParser.ShowRestoreStatementContext context) { NodePosition pos = createPos(context); if (context.identifier() == null) { return new ShowRestoreStmt(null, null, pos); } if (context.expression() != null) { return new ShowRestoreStmt(((Identifier) visit(context.identifier())).getValue(), (Expr) visit(context.expression()), pos); } else { return new ShowRestoreStmt(((Identifier) visit(context.identifier())).getValue(), null, pos); } } @Override public ParseNode visitShowSnapshotStatement(StarRocksParser.ShowSnapshotStatementContext context) { StarRocksParser.ExpressionContext expression = context.expression(); Expr where = null; if (expression != null) { where = (Expr) visit(context.expression()); } String repoName = ((Identifier) visit(context.identifier())).getValue(); return new ShowSnapshotStmt(repoName, where, createPos(context)); } @Override public ParseNode visitCreateRepositoryStatement(StarRocksParser.CreateRepositoryStatementContext context) { boolean isReadOnly = context.READ() != null && context.ONLY() != null; Map<String, String> properties = new HashMap<>(); if (context.propertyList() != null) { List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } String location = ((StringLiteral) visit(context.location)).getValue(); String repoName = ((Identifier) visit(context.repoName)).getValue(); String brokerName = null; if (context.brokerName != null) { brokerName = ((Identifier) visit(context.brokerName)).getValue(); } return new CreateRepositoryStmt(isReadOnly, repoName, brokerName, location, properties, createPos(context)); } @Override public ParseNode visitDropRepositoryStatement(StarRocksParser.DropRepositoryStatementContext context) { return new DropRepositoryStmt(((Identifier) visit(context.identifier())).getValue(), createPos(context)); } @Override public ParseNode visitAddSqlBlackListStatement(StarRocksParser.AddSqlBlackListStatementContext context) { String sql = ((StringLiteral) visit(context.string())).getStringValue(); if (sql == null || sql.isEmpty()) { throw new ParsingException(PARSER_ERROR_MSG.emptySql(), createPos(context.string())); } return new AddSqlBlackListStmt(sql); } @Override public ParseNode visitDelSqlBlackListStatement(StarRocksParser.DelSqlBlackListStatementContext context) { List<Long> indexes = context.INTEGER_VALUE().stream().map(ParseTree::getText) .map(Long::parseLong).collect(toList()); return new DelSqlBlackListStmt(indexes, createPos(context)); } @Override public ParseNode visitShowSqlBlackListStatement(StarRocksParser.ShowSqlBlackListStatementContext context) { return new ShowSqlBlackListStmt(createPos(context)); } @Override public ParseNode visitShowWhiteListStatement(StarRocksParser.ShowWhiteListStatementContext context) { return new ShowWhiteListStmt(); } @Override public ParseNode visitAddBackendBlackListStatement(StarRocksParser.AddBackendBlackListStatementContext ctx) { List<Long> ids = ctx.INTEGER_VALUE().stream().map(ParseTree::getText).map(Long::parseLong).collect(toList()); return new AddBackendBlackListStmt(ids, createPos(ctx)); } @Override public ParseNode visitDelBackendBlackListStatement(StarRocksParser.DelBackendBlackListStatementContext ctx) { List<Long> ids = ctx.INTEGER_VALUE().stream().map(ParseTree::getText).map(Long::parseLong).collect(toList()); return new DelBackendBlackListStmt(createPos(ctx), ids); } @Override public ParseNode visitShowBackendBlackListStatement(StarRocksParser.ShowBackendBlackListStatementContext ctx) { return new ShowBackendBlackListStmt(createPos(ctx)); } @Override public ParseNode visitCreateDataCacheRuleStatement(StarRocksParser.CreateDataCacheRuleStatementContext ctx) { List<StarRocksParser.IdentifierOrStringOrStarContext> partList = ctx.dataCacheTarget().identifierOrStringOrStar(); List<String> parts = partList.stream().map(c -> ((Identifier) visit(c)).getValue()).collect(toList()); QualifiedName qualifiedName = QualifiedName.of(parts); int priority = Integer.parseInt(ctx.INTEGER_VALUE().getText()); if (ctx.MINUS_SYMBOL() != null) { priority *= -1; } Expr predicates = null; if (ctx.expression() != null) { predicates = (Expr) visit(ctx.expression()); } Map<String, String> properties = null; if (ctx.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(ctx.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new CreateDataCacheRuleStmt(qualifiedName, predicates, priority, properties, createPos(ctx)); } @Override public ParseNode visitShowDataCacheRulesStatement(StarRocksParser.ShowDataCacheRulesStatementContext ctx) { return new ShowDataCacheRulesStmt(createPos(ctx)); } @Override public ParseNode visitDropDataCacheRuleStatement(StarRocksParser.DropDataCacheRuleStatementContext ctx) { long id = Long.parseLong(ctx.INTEGER_VALUE().getText()); return new DropDataCacheRuleStmt(id, createPos(ctx)); } @Override public ParseNode visitClearDataCacheRulesStatement(StarRocksParser.ClearDataCacheRulesStatementContext ctx) { return new ClearDataCacheRulesStmt(createPos(ctx)); } @Override public ParseNode visitExportStatement(StarRocksParser.ExportStatementContext context) { StarRocksParser.QualifiedNameContext qualifiedNameContext = context.tableDesc().qualifiedName(); Token start = qualifiedNameContext.start; Token stop = qualifiedNameContext.stop; QualifiedName qualifiedName = getQualifiedName(qualifiedNameContext); TableName tableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.tableDesc().partitionNames() != null) { stop = context.tableDesc().partitionNames().stop; partitionNames = (PartitionNames) visit(context.tableDesc().partitionNames()); } TableRef tableRef = new TableRef(tableName, null, partitionNames, createPos(start, stop)); StringLiteral stringLiteral = (StringLiteral) visit(context.string()); Map<String, String> properties = null; if (context.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } BrokerDesc brokerDesc = getBrokerDesc(context.brokerDesc()); boolean sync = context.SYNC() != null; return new ExportStmt(tableRef, getColumnNames(context.columnAliases()), stringLiteral.getValue(), properties, brokerDesc, createPos(context), sync); } @Override public ParseNode visitCancelExportStatement(StarRocksParser.CancelExportStatementContext context) { String catalog = null; if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new CancelExportStmt(catalog, where, createPos(context)); } @Override public ParseNode visitShowExportStatement(StarRocksParser.ShowExportStatementContext context) { String catalog = null; if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } LimitElement le = null; if (context.limitElement() != null) { le = (LimitElement) visit(context.limitElement()); } List<OrderByElement> orderByElements = null; if (context.ORDER() != null) { orderByElements = new ArrayList<>(); orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } Expr whereExpr = null; if (context.expression() != null) { whereExpr = (Expr) visit(context.expression()); } return new ShowExportStmt(catalog, whereExpr, orderByElements, le, createPos(context)); } @Override public ParseNode visitInstallPluginStatement(StarRocksParser.InstallPluginStatementContext context) { String pluginPath = ((Identifier) visit(context.identifierOrString())).getValue(); Map<String, String> properties = getProperties(context.properties()); return new InstallPluginStmt(pluginPath, properties, createPos(context)); } @Override public ParseNode visitUninstallPluginStatement(StarRocksParser.UninstallPluginStatementContext context) { String pluginPath = ((Identifier) visit(context.identifierOrString())).getValue(); return new UninstallPluginStmt(pluginPath, createPos(context)); } @Override public ParseNode visitCreateFileStatement(StarRocksParser.CreateFileStatementContext context) { String fileName = ((StringLiteral) visit(context.string())).getStringValue(); String catalog = null; if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } Map<String, String> properties = getProperties(context.properties()); return new CreateFileStmt(fileName, catalog, properties, createPos(context)); } @Override public ParseNode visitDropFileStatement(StarRocksParser.DropFileStatementContext context) { String fileName = ((StringLiteral) visit(context.string())).getStringValue(); String catalog = null; if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } Map<String, String> properties = getProperties(context.properties()); return new DropFileStmt(fileName, catalog, properties, createPos(context)); } @Override public ParseNode visitShowSmallFilesStatement(StarRocksParser.ShowSmallFilesStatementContext context) { String catalog = null; if (context.catalog != null) { QualifiedName dbName = getQualifiedName(context.catalog); catalog = dbName.toString(); } return new ShowSmallFilesStmt(catalog, createPos(context)); } @Override public ParseNode visitSetStatement(StarRocksParser.SetStatementContext context) { List<SetListItem> propertyList = visit(context.setVar(), SetListItem.class); return new SetStmt(propertyList, createPos(context)); } @Override public ParseNode visitSetNames(StarRocksParser.SetNamesContext context) { NodePosition pos = createPos(context); if (context.CHAR() != null || context.CHARSET() != null) { if (context.identifierOrString().isEmpty()) { return new SetNamesVar(null, null, pos); } else { return new SetNamesVar( ((Identifier) visit(context.identifierOrString().get(0))).getValue(), null, pos); } } else { String charset = null; if (context.charset != null) { charset = ((Identifier) visit(context.charset)).getValue(); } String collate = null; if (context.collate != null) { collate = ((Identifier) visit(context.collate)).getValue(); } return new SetNamesVar(charset, collate, pos); } } @Override public ParseNode visitSetPassword(StarRocksParser.SetPasswordContext context) { NodePosition pos = createPos(context); String passwordText; StringLiteral stringLiteral = (StringLiteral) visit(context.string()); if (context.PASSWORD().size() > 1) { passwordText = new String(MysqlPassword.makeScrambledPassword(stringLiteral.getStringValue())); } else { passwordText = stringLiteral.getStringValue(); } if (context.user() != null) { return new SetPassVar((UserIdentity) visit(context.user()), passwordText, pos); } else { return new SetPassVar(null, passwordText, pos); } } @Override public ParseNode visitSetUserVar(StarRocksParser.SetUserVarContext context) { VariableExpr variableDesc = (VariableExpr) visit(context.userVariable()); Expr expr = (Expr) visit(context.expression()); return new UserVariable(variableDesc.getName(), expr, createPos(context)); } @Override public ParseNode visitSetSystemVar(StarRocksParser.SetSystemVarContext context) { NodePosition pos = createPos(context); if (context.systemVariable() != null) { VariableExpr variableDesc = (VariableExpr) visit(context.systemVariable()); Expr expr = (Expr) visit(context.setExprOrDefault()); return new SystemVariable(variableDesc.getSetType(), variableDesc.getName(), expr, pos); } else { Expr expr = (Expr) visit(context.setExprOrDefault()); String variable = ((Identifier) visit(context.identifier())).getValue(); if (context.varType() != null) { return new SystemVariable(getVariableType(context.varType()), variable, expr, pos); } else { return new SystemVariable(SetType.SESSION, variable, expr, pos); } } } @Override public ParseNode visitSetTransaction(StarRocksParser.SetTransactionContext context) { return new SetTransaction(createPos(context)); } @Override public ParseNode visitSetUserPropertyStatement(StarRocksParser.SetUserPropertyStatementContext context) { String user = context.FOR() == null ? null : ((StringLiteral) visit(context.string())).getValue(); List<SetUserPropertyVar> list = new ArrayList<>(); if (context.userPropertyList() != null) { List<Property> propertyList = visit(context.userPropertyList().property(), Property.class); for (Property property : propertyList) { SetUserPropertyVar setVar = new SetUserPropertyVar(property.getKey(), property.getValue()); list.add(setVar); } } return new SetUserPropertyStmt(user, list, createPos(context)); } @Override public ParseNode visitSetExprOrDefault(StarRocksParser.SetExprOrDefaultContext context) { if (context.DEFAULT() != null) { return null; } else if (context.ON() != null) { return new StringLiteral("ON"); } else if (context.ALL() != null) { return new StringLiteral("ALL"); } else { return visit(context.expression()); } } @Override public ParseNode visitExecuteScriptStatement(StarRocksParser.ExecuteScriptStatementContext context) { long beId = -1; if (context.INTEGER_VALUE() != null) { beId = Long.parseLong(context.INTEGER_VALUE().getText()); } StringLiteral stringLiteral = (StringLiteral) visit(context.string()); String script = stringLiteral.getStringValue(); return new ExecuteScriptStmt(beId, script, createPos(context)); } @Override public ParseNode visitCreateStorageVolumeStatement(StarRocksParser.CreateStorageVolumeStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String svName = identifier.getValue(); String storageType = ((Identifier) visit(context.typeDesc().identifier())).getValue(); List<StarRocksParser.StringContext> locationList = context.locationsDesc().stringList().string(); List<String> locations = new ArrayList<>(); for (StarRocksParser.StringContext location : locationList) { locations.add(((StringLiteral) visit(location)).getValue()); } return new CreateStorageVolumeStmt(context.IF() != null, svName, storageType, getProperties(context.properties()), locations, context.comment() == null ? null : ((StringLiteral) visit(context.comment().string())).getStringValue(), createPos(context)); } @Override public ParseNode visitShowStorageVolumesStatement(StarRocksParser.ShowStorageVolumesStatementContext context) { String pattern = null; if (context.pattern != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); pattern = stringLiteral.getValue(); } return new ShowStorageVolumesStmt(pattern, createPos(context)); } @Override public ParseNode visitAlterStorageVolumeStatement(StarRocksParser.AlterStorageVolumeStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String svName = identifier.getValue(); NodePosition pos = createPos(context); List<AlterStorageVolumeClause> alterClauses = visit(context.alterStorageVolumeClause(), AlterStorageVolumeClause.class); Map<String, String> properties = new HashMap<>(); String comment = null; for (AlterStorageVolumeClause clause : alterClauses) { if (clause.getOpType().equals(AlterStorageVolumeClause.AlterOpType.ALTER_COMMENT)) { comment = ((AlterStorageVolumeCommentClause) clause).getNewComment(); } else if (clause.getOpType().equals(AlterStorageVolumeClause.AlterOpType.MODIFY_PROPERTIES)) { properties = ((ModifyStorageVolumePropertiesClause) clause).getProperties(); } } return new AlterStorageVolumeStmt(svName, properties, comment, pos); } @Override public ParseNode visitDropStorageVolumeStatement(StarRocksParser.DropStorageVolumeStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String svName = identifier.getValue(); return new DropStorageVolumeStmt(context.IF() != null, svName, createPos(context)); } @Override public ParseNode visitDescStorageVolumeStatement(StarRocksParser.DescStorageVolumeStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String svName = identifier.getValue(); return new DescStorageVolumeStmt(svName, createPos(context)); } @Override public ParseNode visitSetDefaultStorageVolumeStatement( StarRocksParser.SetDefaultStorageVolumeStatementContext context) { Identifier identifier = (Identifier) visit(context.identifierOrString()); String svName = identifier.getValue(); return new SetDefaultStorageVolumeStmt(svName, createPos(context)); } @Override public ParseNode visitModifyStorageVolumeCommentClause( StarRocksParser.ModifyStorageVolumeCommentClauseContext context) { String comment = ((StringLiteral) visit(context.string())).getStringValue(); return new AlterStorageVolumeCommentClause(comment, createPos(context)); } @Override public ParseNode visitModifyStorageVolumePropertiesClause( StarRocksParser.ModifyStorageVolumePropertiesClauseContext context) { Map<String, String> properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } return new ModifyStorageVolumePropertiesClause(properties, createPos(context)); } @Override public ParseNode visitUpdateFailPointStatusStatement( StarRocksParser.UpdateFailPointStatusStatementContext ctx) { String failpointName = ((StringLiteral) visit(ctx.string(0))).getStringValue(); List<String> backendList = null; if (ctx.BACKEND() != null) { String tmp = ((StringLiteral) visit(ctx.string(1))).getStringValue(); backendList = Lists.newArrayList(tmp.split(",")); } if (ctx.ENABLE() != null) { if (ctx.TIMES() != null) { int nTimes = Integer.parseInt(ctx.INTEGER_VALUE().getText()); if (nTimes <= 0) { throw new ParsingException(String.format( "Invalid TIMES value %d, it should be a positive integer", nTimes)); } return new UpdateFailPointStatusStatement(failpointName, nTimes, backendList, createPos(ctx)); } else if (ctx.PROBABILITY() != null) { double probability = Double.parseDouble(ctx.DECIMAL_VALUE().getText()); if (probability < 0 || probability > 1) { throw new ParsingException(String.format( "Invalid PROBABILITY value %f, it should be in range [0, 1]", probability)); } return new UpdateFailPointStatusStatement(failpointName, probability, backendList, createPos(ctx)); } return new UpdateFailPointStatusStatement(failpointName, true, backendList, createPos(ctx)); } return new UpdateFailPointStatusStatement(failpointName, false, backendList, createPos(ctx)); } @Override public ParseNode visitShowFailPointStatement(StarRocksParser.ShowFailPointStatementContext ctx) { String pattern = null; List<String> backendList = null; int idx = 0; if (ctx.LIKE() != null) { pattern = ((StringLiteral) visit(ctx.string(idx++))).getStringValue(); } if (ctx.BACKEND() != null) { String tmp = ((StringLiteral) visit(ctx.string(idx++))).getStringValue(); backendList = Lists.newArrayList(tmp.split(",")); } return new ShowFailPointStatement(pattern, backendList, createPos(ctx)); } @Override public ParseNode visitCreateDictionaryStatement(StarRocksParser.CreateDictionaryStatementContext context) { String dictionaryName = getQualifiedName(context.dictionaryName().qualifiedName()).toString(); String queryableObject = getQualifiedName(context.qualifiedName()).toString(); List<StarRocksParser.DictionaryColumnDescContext> dictionaryColumnDescs = context.dictionaryColumnDesc(); List<String> dictionaryKeys = new ArrayList<>(); List<String> dictionaryValues = new ArrayList<>(); for (StarRocksParser.DictionaryColumnDescContext desc : dictionaryColumnDescs) { String columnName = getQualifiedName(desc.qualifiedName()).toString(); if (desc.KEY() != null) { dictionaryKeys.add(columnName); } if (desc.VALUE() != null) { dictionaryValues.add(columnName); } } Map<String, String> properties = null; if (context.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new CreateDictionaryStmt(dictionaryName, queryableObject, dictionaryKeys, dictionaryValues, properties, createPos(context)); } @Override public ParseNode visitDropDictionaryStatement(StarRocksParser.DropDictionaryStatementContext context) { String dictionaryName = getQualifiedName(context.qualifiedName()).toString(); boolean cacheOnly = false; if (context.CACHE() != null) { cacheOnly = true; } return new DropDictionaryStmt(dictionaryName, cacheOnly, createPos(context)); } @Override public ParseNode visitRefreshDictionaryStatement(StarRocksParser.RefreshDictionaryStatementContext context) { String dictionaryName = getQualifiedName(context.qualifiedName()).toString(); return new RefreshDictionaryStmt(dictionaryName, createPos(context)); } @Override public ParseNode visitShowDictionaryStatement(StarRocksParser.ShowDictionaryStatementContext context) { String dictionaryName = null; if (context.qualifiedName() != null) { dictionaryName = getQualifiedName(context.qualifiedName()).toString(); } return new ShowDictionaryStmt(dictionaryName, createPos(context)); } @Override public ParseNode visitCancelRefreshDictionaryStatement( StarRocksParser.CancelRefreshDictionaryStatementContext context) { String dictionaryName = getQualifiedName(context.qualifiedName()).toString(); return new CancelRefreshDictionaryStmt(dictionaryName, createPos(context)); } @Override public ParseNode visitUnsupportedStatement(StarRocksParser.UnsupportedStatementContext context) { return new UnsupportedStmt(createPos(context)); } @Override public ParseNode visitAddFrontendClause(StarRocksParser.AddFrontendClauseContext context) { String cluster = ((StringLiteral) visit(context.string())).getStringValue(); NodePosition pos = createPos(context); if (context.FOLLOWER() != null) { return new AddFollowerClause(cluster, pos); } else { return new AddObserverClause(cluster, pos); } } @Override public ParseNode visitDropFrontendClause(StarRocksParser.DropFrontendClauseContext context) { String cluster = ((StringLiteral) visit(context.string())).getStringValue(); NodePosition pos = createPos(context); if (context.FOLLOWER() != null) { return new DropFollowerClause(cluster, pos); } else { return new DropObserverClause(cluster, pos); } } @Override public ParseNode visitModifyFrontendHostClause(StarRocksParser.ModifyFrontendHostClauseContext context) { List<String> clusters = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new ModifyFrontendAddressClause(clusters.get(0), clusters.get(1), createPos(context)); } @Override public ParseNode visitAddBackendClause(StarRocksParser.AddBackendClauseContext context) { List<String> backends = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new AddBackendClause(backends, createPos(context)); } @Override public ParseNode visitDropBackendClause(StarRocksParser.DropBackendClauseContext context) { List<String> clusters = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new DropBackendClause(clusters, context.FORCE() != null, createPos(context)); } @Override public ParseNode visitDecommissionBackendClause(StarRocksParser.DecommissionBackendClauseContext context) { List<String> clusters = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new DecommissionBackendClause(clusters, createPos(context)); } @Override public ParseNode visitModifyBackendClause(StarRocksParser.ModifyBackendClauseContext context) { List<String> strings = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); if (context.HOST() != null) { return new ModifyBackendClause(strings.get(0), strings.get(1), createPos(context)); } else { String backendHostPort = strings.get(0); Map<String, String> properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } return new ModifyBackendClause(backendHostPort, properties, createPos(context)); } } @Override public ParseNode visitAddComputeNodeClause(StarRocksParser.AddComputeNodeClauseContext context) { List<String> hostPorts = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new AddComputeNodeClause(hostPorts); } @Override public ParseNode visitDropComputeNodeClause(StarRocksParser.DropComputeNodeClauseContext context) { List<String> hostPorts = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); return new DropComputeNodeClause(hostPorts, createPos(context)); } @Override public ParseNode visitModifyBrokerClause(StarRocksParser.ModifyBrokerClauseContext context) { String brokerName = ((Identifier) visit(context.identifierOrString())).getValue(); NodePosition pos = createPos(context); if (context.ALL() != null) { return ModifyBrokerClause.createDropAllBrokerClause(brokerName, pos); } List<String> hostPorts = context.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()).collect(toList()); if (context.ADD() != null) { return ModifyBrokerClause.createAddBrokerClause(brokerName, hostPorts, pos); } return ModifyBrokerClause.createDropBrokerClause(brokerName, hostPorts, pos); } @Override public ParseNode visitAlterLoadErrorUrlClause(StarRocksParser.AlterLoadErrorUrlClauseContext context) { return new AlterLoadErrorUrlClause(getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitCreateImageClause(StarRocksParser.CreateImageClauseContext context) { return new CreateImageClause(createPos(context)); } @Override public ParseNode visitCleanTabletSchedQClause( StarRocksParser.CleanTabletSchedQClauseContext context) { return new CleanTabletSchedQClause(createPos(context)); } @Override public ParseNode visitCreateIndexClause(StarRocksParser.CreateIndexClauseContext context) { Token start = context.identifier().start; String indexName = ((Identifier) visit(context.identifier())).getValue(); List<Identifier> columnList = visit(context.identifierList().identifier(), Identifier.class); Token stop = context.identifierList().stop; String comment = null; if (context.comment() != null) { stop = context.comment().stop; comment = ((StringLiteral) visit(context.comment())).getStringValue(); } IndexDef indexDef = new IndexDef(indexName, columnList.stream().map(Identifier::getValue).collect(toList()), getIndexType(context.indexType()), comment, getPropertyList(context.propertyList()), createPos(start, stop)); return new CreateIndexClause(indexDef, createPos(context)); } @Override public ParseNode visitDropIndexClause(StarRocksParser.DropIndexClauseContext context) { Identifier identifier = (Identifier) visit(context.identifier()); return new DropIndexClause(identifier.getValue(), createPos(context)); } @Override public ParseNode visitTableRenameClause(StarRocksParser.TableRenameClauseContext context) { Identifier identifier = (Identifier) visit(context.identifier()); return new TableRenameClause(identifier.getValue(), createPos(context)); } @Override public ParseNode visitModifyCommentClause(StarRocksParser.ModifyCommentClauseContext context) { String comment = ((StringLiteral) visit(context.string())).getStringValue(); return new AlterTableCommentClause(comment, createPos(context)); } @Override public ParseNode visitSwapTableClause(StarRocksParser.SwapTableClauseContext context) { Identifier identifier = (Identifier) visit(context.identifier()); return new SwapTableClause(identifier.getValue(), createPos(context)); } @Override public ParseNode visitModifyPropertiesClause(StarRocksParser.ModifyPropertiesClauseContext context) { Map<String, String> properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } return new ModifyTablePropertiesClause(properties, createPos(context)); } @Override public ParseNode visitOptimizeClause(StarRocksParser.OptimizeClauseContext context) { return new OptimizeClause( context.keyDesc() == null ? null : getKeysDesc(context.keyDesc()), context.partitionDesc() == null ? null : getPartitionDesc(context.partitionDesc(), null), context.distributionDesc() == null ? null : (DistributionDesc) visit(context.distributionDesc()), context.orderByDesc() == null ? null : visit(context.orderByDesc().identifierList().identifier(), Identifier.class) .stream().map(Identifier::getValue).collect(toList()), context.partitionNames() == null ? null : (PartitionNames) visit(context.partitionNames()), createPos(context)); } @Override public ParseNode visitAddColumnClause(StarRocksParser.AddColumnClauseContext context) { ColumnDef columnDef = getColumnDef(context.columnDesc()); if (columnDef.isAutoIncrement()) { throw new ParsingException(PARSER_ERROR_MSG.autoIncrementForbid(columnDef.getName(), "ADD"), columnDef.getPos()); } ColumnPosition columnPosition = null; if (context.FIRST() != null) { columnPosition = ColumnPosition.FIRST; } else if (context.AFTER() != null) { StarRocksParser.IdentifierContext identifier = context.identifier(0); String afterColumnName = getIdentifierName(identifier); columnPosition = new ColumnPosition(afterColumnName, createPos(identifier)); } String rollupName = null; if (context.rollupName != null) { rollupName = getIdentifierName(context.rollupName); } Map<String, String> properties = new HashMap<>(); ; properties = getProperties(context.properties()); if (columnDef.isGeneratedColumn()) { if (rollupName != null) { throw new ParsingException( PARSER_ERROR_MSG.generatedColumnLimit("rollupName", "ADD GENERATED COLUMN"), columnDef.getPos()); } if (columnPosition != null) { throw new ParsingException( PARSER_ERROR_MSG.generatedColumnLimit("AFTER", "ADD GENERATED COLUMN"), columnDef.getPos()); } if (properties.size() != 0) { throw new ParsingException( PARSER_ERROR_MSG.generatedColumnLimit("properties", "ADD GENERATED COLUMN"), columnDef.getPos()); } } return new AddColumnClause(columnDef, columnPosition, rollupName, properties, createPos(context)); } @Override public ParseNode visitAddColumnsClause(StarRocksParser.AddColumnsClauseContext context) { List<ColumnDef> columnDefs = getColumnDefs(context.columnDesc()); Map<String, String> properties = new HashMap<>(); properties = getProperties(context.properties()); String rollupName = null; if (context.rollupName != null) { rollupName = getIdentifierName(context.rollupName); } for (ColumnDef columnDef : columnDefs) { if (columnDef.isAutoIncrement()) { throw new ParsingException(PARSER_ERROR_MSG.autoIncrementForbid(columnDef.getName(), "ADD"), columnDef.getPos()); } if (columnDef.isGeneratedColumn()) { if (rollupName != null) { throw new ParsingException( PARSER_ERROR_MSG.generatedColumnLimit("rollupName", "ADD GENERATED COLUMN"), columnDef.getPos()); } if (properties.size() != 0) { throw new ParsingException( PARSER_ERROR_MSG.generatedColumnLimit("properties", "ADD GENERATED COLUMN"), columnDef.getPos()); } } } return new AddColumnsClause(columnDefs, rollupName, getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitDropColumnClause(StarRocksParser.DropColumnClauseContext context) { String columnName = getIdentifierName(context.identifier(0)); String rollupName = null; if (context.rollupName != null) { rollupName = getIdentifierName(context.rollupName); } return new DropColumnClause(columnName, rollupName, getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitModifyColumnClause(StarRocksParser.ModifyColumnClauseContext context) { ColumnDef columnDef = getColumnDef(context.columnDesc()); if (columnDef.isAutoIncrement()) { throw new ParsingException(PARSER_ERROR_MSG.autoIncrementForbid(columnDef.getName(), "MODIFY"), columnDef.getPos()); } ColumnPosition columnPosition = null; if (context.FIRST() != null) { columnPosition = ColumnPosition.FIRST; } else if (context.AFTER() != null) { StarRocksParser.IdentifierContext identifier = context.identifier(0); String afterColumnName = getIdentifierName(identifier); columnPosition = new ColumnPosition(afterColumnName, createPos(identifier)); } String rollupName = null; if (context.rollupName != null) { rollupName = getIdentifierName(context.rollupName); } if (columnDef.isGeneratedColumn()) { if (rollupName != null) { throw new ParsingException(PARSER_ERROR_MSG.generatedColumnLimit("rollupName", "MODIFY GENERATED COLUMN"), columnDef.getPos()); } if (columnPosition != null) { throw new ParsingException(PARSER_ERROR_MSG.generatedColumnLimit("columnPosition", "MODIFY GENERATED COLUMN"), columnDef.getPos()); } } return new ModifyColumnClause(columnDef, columnPosition, rollupName, getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitColumnRenameClause(StarRocksParser.ColumnRenameClauseContext context) { String oldColumnName = getIdentifierName(context.oldColumn); String newColumnName = getIdentifierName(context.newColumn); return new ColumnRenameClause(oldColumnName, newColumnName, createPos(context)); } @Override public ParseNode visitReorderColumnsClause(StarRocksParser.ReorderColumnsClauseContext context) { List<String> cols = context.identifierList().identifier().stream().map(this::getIdentifierName).collect(toList()); String rollupName = null; if (context.rollupName != null) { rollupName = getIdentifierName(context.rollupName); } return new ReorderColumnsClause(cols, rollupName, getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitRollupRenameClause(StarRocksParser.RollupRenameClauseContext context) { String rollupName = ((Identifier) visit(context.rollupName)).getValue(); String newRollupName = ((Identifier) visit(context.newRollupName)).getValue(); return new RollupRenameClause(rollupName, newRollupName, createPos(context)); } @Override public ParseNode visitCompactionClause(StarRocksParser.CompactionClauseContext ctx) { NodePosition pos = createPos(ctx); boolean baseCompaction = ctx.CUMULATIVE() == null; if (ctx.identifier() != null) { final String partitionName = ((Identifier) visit(ctx.identifier())).getValue(); return new CompactionClause(Collections.singletonList(partitionName), baseCompaction, pos); } else if (ctx.identifierList() != null) { final List<Identifier> identifierList = visit(ctx.identifierList().identifier(), Identifier.class); return new CompactionClause(identifierList.stream().map(Identifier::getValue).collect(toList()), baseCompaction, pos); } else { return new CompactionClause(baseCompaction, pos); } } @Override public ParseNode visitAddPartitionClause(StarRocksParser.AddPartitionClauseContext context) { boolean temporary = context.TEMPORARY() != null; PartitionDesc partitionDesc = null; if (context.singleRangePartition() != null) { partitionDesc = (PartitionDesc) visitSingleRangePartition(context.singleRangePartition()); } else if (context.multiRangePartition() != null) { partitionDesc = (PartitionDesc) visitMultiRangePartition(context.multiRangePartition()); } else if (context.singleItemListPartitionDesc() != null) { partitionDesc = (PartitionDesc) visitSingleItemListPartitionDesc(context.singleItemListPartitionDesc()); } else if (context.multiItemListPartitionDesc() != null) { partitionDesc = (PartitionDesc) visitMultiItemListPartitionDesc(context.multiItemListPartitionDesc()); } DistributionDesc distributionDesc = null; if (context.distributionDesc() != null) { distributionDesc = (DistributionDesc) visitDistributionDesc(context.distributionDesc()); } Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new AddPartitionClause(partitionDesc, distributionDesc, properties, temporary, createPos(context)); } @Override public ParseNode visitDropPartitionClause(StarRocksParser.DropPartitionClauseContext context) { String partitionName = ((Identifier) visit(context.identifier())).getValue(); boolean temp = context.TEMPORARY() != null; boolean force = context.FORCE() != null; boolean exists = context.EXISTS() != null; return new DropPartitionClause(exists, partitionName, temp, force, createPos(context)); } @Override public ParseNode visitTruncatePartitionClause(StarRocksParser.TruncatePartitionClauseContext context) { PartitionNames partitionNames = null; if (context.partitionNames() != null) { partitionNames = (PartitionNames) visit(context.partitionNames()); } return new TruncatePartitionClause(partitionNames, createPos(context)); } @Override public ParseNode visitModifyPartitionClause(StarRocksParser.ModifyPartitionClauseContext context) { Map<String, String> properties = null; NodePosition pos = createPos(context); if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } if (context.identifier() != null) { final String partitionName = ((Identifier) visit(context.identifier())).getValue(); return new ModifyPartitionClause(Collections.singletonList(partitionName), properties, pos); } else if (context.identifierList() != null) { final List<Identifier> identifierList = visit(context.identifierList().identifier(), Identifier.class); return new ModifyPartitionClause(identifierList.stream().map(Identifier::getValue).collect(toList()), properties, pos); } else { return ModifyPartitionClause.createStarClause(properties, pos); } } @Override public ParseNode visitReplacePartitionClause(StarRocksParser.ReplacePartitionClauseContext context) { PartitionNames partitionNames = (PartitionNames) visit(context.parName); PartitionNames newPartitionNames = (PartitionNames) visit(context.tempParName); return new ReplacePartitionClause(partitionNames, newPartitionNames, getProperties(context.properties()), createPos(context)); } @Override public ParseNode visitPartitionRenameClause(StarRocksParser.PartitionRenameClauseContext context) { String partitionName = ((Identifier) visit(context.parName)).getValue(); String newPartitionName = ((Identifier) visit(context.newParName)).getValue(); return new PartitionRenameClause(partitionName, newPartitionName, createPos(context)); } private PipeName resolvePipeName(StarRocksParser.QualifiedNameContext context) { String dbName = null; String pipeName = null; QualifiedName qualifiedName = getQualifiedName(context); if (qualifiedName.getParts().size() == 2) { dbName = qualifiedName.getParts().get(0); pipeName = qualifiedName.getParts().get(1); } else if (qualifiedName.getParts().size() == 1) { pipeName = qualifiedName.getParts().get(0); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidPipeName(qualifiedName.toString())); } if (dbName != null && pipeName != null) { return new PipeName(createPos(context), dbName, pipeName); } else if (pipeName != null) { return new PipeName(createPos(context), pipeName); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidPipeName(qualifiedName.toString())); } } @Override public ParseNode visitCreatePipeStatement(StarRocksParser.CreatePipeStatementContext context) { PipeName pipeName = resolvePipeName(context.qualifiedName()); boolean ifNotExists = context.ifNotExists() != null && context.ifNotExists().IF() != null; boolean replace = context.orReplace() != null && context.orReplace().OR() != null; if (ifNotExists && replace) { throw new ParsingException(PARSER_ERROR_MSG.conflictedOptions("OR REPLACE", "IF NOT EXISTS")); } ParseNode insertNode = visit(context.insertStatement()); if (!(insertNode instanceof InsertStmt)) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedStatement(insertNode.toSql()), context.insertStatement()); } Map<String, String> properties = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } InsertStmt insertStmt = (InsertStmt) insertNode; int insertSqlIndex = context.insertStatement().start.getStartIndex(); return new CreatePipeStmt(ifNotExists, replace, pipeName, insertSqlIndex, insertStmt, properties, createPos(context)); } @Override public ParseNode visitDropPipeStatement(StarRocksParser.DropPipeStatementContext context) { PipeName pipeName = resolvePipeName(context.qualifiedName()); boolean ifExists = context.IF() != null; return new DropPipeStmt(ifExists, pipeName, createPos(context)); } @Override public ParseNode visitShowPipeStatement(StarRocksParser.ShowPipeStatementContext context) { String dbName = null; if (context.qualifiedName() != null) { dbName = getQualifiedName(context.qualifiedName()).toString(); } List<OrderByElement> orderBy = null; if (context.ORDER() != null) { orderBy = new ArrayList<>(); orderBy.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limit = null; if (context.limitElement() != null) { limit = (LimitElement) visit(context.limitElement()); } if (context.LIKE() != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.pattern); return new ShowPipeStmt(dbName, stringLiteral.getValue(), null, orderBy, limit, createPos(context)); } else if (context.WHERE() != null) { return new ShowPipeStmt(dbName, null, (Expr) visit(context.expression()), orderBy, limit, createPos(context)); } else { return new ShowPipeStmt(dbName, null, null, orderBy, limit, createPos(context)); } } @Override public ParseNode visitDescPipeStatement(StarRocksParser.DescPipeStatementContext context) { PipeName pipeName = resolvePipeName(context.qualifiedName()); return new DescPipeStmt(createPos(context), pipeName); } @Override public ParseNode visitAlterPipeClause(StarRocksParser.AlterPipeClauseContext context) { if (context.SUSPEND() != null) { return new AlterPipePauseResume(createPos(context), true); } else if (context.RESUME() != null) { return new AlterPipePauseResume(createPos(context), false); } else if (context.RETRY() != null) { if (context.ALL() != null) { return new AlterPipeClauseRetry(createPos(context), true); } else { String fileName = ((StringLiteral) visitString(context.fileName)).getStringValue(); return new AlterPipeClauseRetry(createPos(context), false, fileName); } } else if (context.SET() != null) { Map<String, String> properties = getPropertyList(context.propertyList()); if (MapUtils.isEmpty(properties)) { throw new ParsingException("empty property"); } return new AlterPipeSetProperty(createPos(context), properties); } else { throw new ParsingException(PARSER_ERROR_MSG.unsupportedOpWithInfo(context.toString())); } } @Override public ParseNode visitAlterPipeStatement(StarRocksParser.AlterPipeStatementContext context) { PipeName pipeName = resolvePipeName(context.qualifiedName()); AlterPipeClause alterPipeClause = (AlterPipeClause) visit(context.alterPipeClause()); return new AlterPipeStmt(createPos(context), pipeName, alterPipeClause); } @Override public ParseNode visitQueryStatement(StarRocksParser.QueryStatementContext context) { QueryRelation queryRelation = (QueryRelation) visit(context.queryRelation()); QueryStatement queryStatement = new QueryStatement(queryRelation); if (context.outfile() != null) { queryStatement.setOutFileClause((OutFileClause) visit(context.outfile())); } if (context.explainDesc() != null) { queryStatement.setIsExplain(true, getExplainType(context.explainDesc())); } if (context.optimizerTrace() != null) { String module = "base"; if (context.optimizerTrace().identifier() != null) { module = ((Identifier) visit(context.optimizerTrace().identifier())).getValue(); } queryStatement.setIsTrace(getTraceMode(context.optimizerTrace()), module); } return queryStatement; } private Tracers.Mode getTraceMode(StarRocksParser.OptimizerTraceContext context) { if (context.LOGS() != null) { return Tracers.Mode.LOGS; } else if (context.VALUES() != null) { return Tracers.Mode.VARS; } else if (context.TIMES() != null) { return Tracers.Mode.TIMER; } else if (context.ALL() != null) { return Tracers.Mode.TIMING; } else { return Tracers.Mode.NONE; } } @Override public ParseNode visitQueryRelation(StarRocksParser.QueryRelationContext context) { QueryRelation queryRelation = (QueryRelation) visit(context.queryNoWith()); List<CTERelation> withQuery = new ArrayList<>(); if (context.withClause() != null) { withQuery = visit(context.withClause().commonTableExpression(), CTERelation.class); } withQuery.forEach(queryRelation::addCTERelation); return queryRelation; } @Override public ParseNode visitCommonTableExpression(StarRocksParser.CommonTableExpressionContext context) { QueryRelation queryRelation = (QueryRelation) visit(context.queryRelation()); return new CTERelation( RelationId.of(queryRelation).hashCode(), ((Identifier) visit(context.name)).getValue(), getColumnNames(context.columnAliases()), new QueryStatement(queryRelation), queryRelation.getPos()); } @Override public ParseNode visitQueryNoWith(StarRocksParser.QueryNoWithContext context) { List<OrderByElement> orderByElements = new ArrayList<>(); if (context.ORDER() != null) { orderByElements.addAll(visit(context.sortItem(), OrderByElement.class)); } LimitElement limitElement = null; if (context.limitElement() != null) { limitElement = (LimitElement) visit(context.limitElement()); } QueryRelation queryRelation = (QueryRelation) visit(context.queryPrimary()); queryRelation.setOrderBy(orderByElements); queryRelation.setLimit(limitElement); return queryRelation; } @Override public ParseNode visitSetOperation(StarRocksParser.SetOperationContext context) { NodePosition pos = createPos(context); QueryRelation left = (QueryRelation) visit(context.left); QueryRelation right = (QueryRelation) visit(context.right); boolean distinct = true; if (context.setQuantifier() != null) { if (context.setQuantifier().DISTINCT() != null) { distinct = true; } else if (context.setQuantifier().ALL() != null) { distinct = false; } } SetQualifier setQualifier = distinct ? SetQualifier.DISTINCT : SetQualifier.ALL; switch (context.operator.getType()) { case StarRocksLexer.UNION: if (left instanceof UnionRelation && ((UnionRelation) left).getQualifier().equals(setQualifier)) { ((UnionRelation) left).addRelation(right); return left; } else { return new UnionRelation(Lists.newArrayList(left, right), setQualifier, pos); } case StarRocksLexer.INTERSECT: if (left instanceof IntersectRelation && ((IntersectRelation) left).getQualifier().equals(setQualifier)) { ((IntersectRelation) left).addRelation(right); return left; } else { return new IntersectRelation(Lists.newArrayList(left, right), setQualifier, pos); } default: if (left instanceof ExceptRelation && ((ExceptRelation) left).getQualifier().equals(setQualifier)) { ((ExceptRelation) left).addRelation(right); return left; } else { return new ExceptRelation(Lists.newArrayList(left, right), setQualifier, pos); } } } private Map<String, String> extractVarHintValues(List<HintNode> hints) { Map<String, String> selectHints = new HashMap<>(); if (CollectionUtils.isEmpty(hints)) { return selectHints; } for (HintNode hintNode : hints) { if (hintNode instanceof SetVarHint) { selectHints.putAll(hintNode.getValue()); } } return selectHints; } @Override public ParseNode visitQuerySpecification(StarRocksParser.QuerySpecificationContext context) { Relation from = null; List<SelectListItem> selectItems = visit(context.selectItem(), SelectListItem.class); if (context.fromClause() instanceof StarRocksParser.DualContext) { for (SelectListItem item : selectItems) { if (item.isStar()) { throw new ParsingException(PARSER_ERROR_MSG.noTableUsed(), item.getPos()); } } } else { StarRocksParser.FromContext fromContext = (StarRocksParser.FromContext) context.fromClause(); if (fromContext.relations() != null) { List<Relation> relations = visit(fromContext.relations().relation(), Relation.class); Iterator<Relation> iterator = relations.iterator(); Relation relation = iterator.next(); while (iterator.hasNext()) { Relation next = iterator.next(); relation = new JoinRelation(null, relation, next, null, false); } from = relation; } } /* from == null means a statement without from or from dual, add a single row of null values here, so that the semantics are the same, and the processing of subsequent query logic can be simplified, such as select sum(1) or select sum(1) from dual, will be converted to select sum(1) from (values(null)) t. This can share the same logic as select sum(1) from table */ if (from == null) { from = ValuesRelation.newDualRelation(); } boolean isDistinct = context.setQuantifier() != null && context.setQuantifier().DISTINCT() != null; SelectList selectList = new SelectList(selectItems, isDistinct); selectList.setHintNodes(hintMap.get(context)); SelectRelation resultSelectRelation = new SelectRelation( selectList, from, (Expr) visitIfPresent(context.where), (GroupByClause) visitIfPresent(context.groupingElement()), (Expr) visitIfPresent(context.having), createPos(context)); if (context.qualifyFunction != null) { resultSelectRelation.setOrderBy(new ArrayList<>()); SubqueryRelation subqueryRelation = new SubqueryRelation(new QueryStatement(resultSelectRelation)); TableName qualifyTableName = new TableName(null, "__QUALIFY__TABLE"); subqueryRelation.setAlias(qualifyTableName); SelectListItem windowFunction = selectItems.get(selectItems.size() - 1); windowFunction.setAlias("__QUALIFY__VALUE"); long selectValue = Long.parseLong(context.limit.getText()); List<SelectListItem> selectItemsVirtual = Lists.newArrayList(selectItems); selectItemsVirtual.remove(selectItemsVirtual.size() - 1); List<SelectListItem> selectItemsOuter = new ArrayList<>(); for (SelectListItem item : selectItemsVirtual) { if (item.getExpr() instanceof SlotRef) { SlotRef exprRef = (SlotRef) item.getExpr(); String columnName = item.getAlias() == null ? exprRef.getColumnName() : item.getAlias(); SlotRef resultSlotRef = new SlotRef(qualifyTableName, columnName); selectItemsOuter.add(new SelectListItem(resultSlotRef, null)); } else { throw new ParsingException("Can't support result other than column."); } } SelectList selectListOuter = new SelectList(selectItemsOuter, isDistinct); IntLiteral rightValue = new IntLiteral(selectValue); SlotRef leftSlotRef = new SlotRef(qualifyTableName, "__QUALIFY__VALUE"); BinaryType op = getComparisonOperator(((TerminalNode) context.comparisonOperator() .getChild(0)).getSymbol()); return new SelectRelation(selectListOuter, subqueryRelation, new BinaryPredicate(op, leftSlotRef, rightValue), null, null, createPos(context)); } else { return resultSelectRelation; } } @Override public ParseNode visitSelectSingle(StarRocksParser.SelectSingleContext context) { String alias = null; if (context.identifier() != null) { alias = ((Identifier) visit(context.identifier())).getValue(); } else if (context.string() != null) { alias = ((StringLiteral) visit(context.string())).getStringValue(); } return new SelectListItem((Expr) visit(context.expression()), alias, createPos(context)); } @Override public ParseNode visitSelectAll(StarRocksParser.SelectAllContext context) { NodePosition pos = createPos(context); if (context.qualifiedName() != null) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); return new SelectListItem(qualifiedNameToTableName(qualifiedName), pos); } return new SelectListItem(null, pos); } @Override public ParseNode visitSingleGroupingSet(StarRocksParser.SingleGroupingSetContext context) { return new GroupByClause(new ArrayList<>(visit(context.expressionList().expression(), Expr.class)), GroupByClause.GroupingType.GROUP_BY, createPos(context)); } @Override public ParseNode visitRollup(StarRocksParser.RollupContext context) { List<Expr> groupingExprs = visit(context.expressionList().expression(), Expr.class); return new GroupByClause(new ArrayList<>(groupingExprs), GroupByClause.GroupingType.ROLLUP, createPos(context)); } @Override public ParseNode visitCube(StarRocksParser.CubeContext context) { List<Expr> groupingExprs = visit(context.expressionList().expression(), Expr.class); return new GroupByClause(new ArrayList<>(groupingExprs), GroupByClause.GroupingType.CUBE, createPos(context)); } @Override public ParseNode visitMultipleGroupingSets(StarRocksParser.MultipleGroupingSetsContext context) { List<ArrayList<Expr>> groupingSets = new ArrayList<>(); for (StarRocksParser.GroupingSetContext groupingSetContext : context.groupingSet()) { List<Expr> l = visit(groupingSetContext.expression(), Expr.class); groupingSets.add(new ArrayList<>(l)); } return new GroupByClause(groupingSets, GroupByClause.GroupingType.GROUPING_SETS, createPos(context)); } @Override public ParseNode visitGroupingOperation(StarRocksParser.GroupingOperationContext context) { List<Expr> arguments = visit(context.expression(), Expr.class); return new GroupingFunctionCallExpr("grouping", arguments, createPos(context)); } @Override public ParseNode visitWindowFrame(StarRocksParser.WindowFrameContext context) { NodePosition pos = createPos(context); if (context.end != null) { return new AnalyticWindow( getFrameType(context.frameType), (AnalyticWindow.Boundary) visit(context.start), (AnalyticWindow.Boundary) visit(context.end), pos); } else { return new AnalyticWindow( getFrameType(context.frameType), (AnalyticWindow.Boundary) visit(context.start), pos); } } private static AnalyticWindow.Type getFrameType(Token type) { if (type.getType() == StarRocksLexer.RANGE) { return AnalyticWindow.Type.RANGE; } else { return AnalyticWindow.Type.ROWS; } } @Override public ParseNode visitUnboundedFrame(StarRocksParser.UnboundedFrameContext context) { return new AnalyticWindow.Boundary(getUnboundedFrameBoundType(context.boundType), null); } @Override public ParseNode visitBoundedFrame(StarRocksParser.BoundedFrameContext context) { return new AnalyticWindow.Boundary(getBoundedFrameBoundType(context.boundType), (Expr) visit(context.expression())); } @Override public ParseNode visitCurrentRowBound(StarRocksParser.CurrentRowBoundContext context) { return new AnalyticWindow.Boundary(AnalyticWindow.BoundaryType.CURRENT_ROW, null); } private static AnalyticWindow.BoundaryType getBoundedFrameBoundType(Token token) { if (token.getType() == StarRocksLexer.PRECEDING) { return AnalyticWindow.BoundaryType.PRECEDING; } else { return AnalyticWindow.BoundaryType.FOLLOWING; } } private static AnalyticWindow.BoundaryType getUnboundedFrameBoundType(Token token) { if (token.getType() == StarRocksLexer.PRECEDING) { return AnalyticWindow.BoundaryType.UNBOUNDED_PRECEDING; } else { return AnalyticWindow.BoundaryType.UNBOUNDED_FOLLOWING; } } @Override public ParseNode visitSortItem(StarRocksParser.SortItemContext context) { return new OrderByElement( (Expr) visit(context.expression()), getOrderingType(context.ordering), getNullOrderingType(getOrderingType(context.ordering), context.nullOrdering), createPos(context)); } private boolean getNullOrderingType(boolean isAsc, Token token) { if (token == null) { return (!SqlModeHelper.check(sqlMode, SqlModeHelper.MODE_SORT_NULLS_LAST)) == isAsc; } return token.getType() == StarRocksLexer.FIRST; } private static boolean getOrderingType(Token token) { if (token == null) { return true; } return token.getType() == StarRocksLexer.ASC; } @Override public ParseNode visitLimitElement(StarRocksParser.LimitElementContext context) { if (context.limit.getText().equals("?") || (context.offset != null && context.offset.getText().equals("?"))) { throw new ParsingException("using parameter(?) as limit or offset not supported"); } long limit = Long.parseLong(context.limit.getText()); long offset = 0; if (context.offset != null) { offset = Long.parseLong(context.offset.getText()); } return new LimitElement(offset, limit, createPos(context)); } @Override public ParseNode visitRelation(StarRocksParser.RelationContext context) { Relation relation = (Relation) visit(context.relationPrimary()); List<JoinRelation> joinRelations = visit(context.joinRelation(), JoinRelation.class); Relation leftChildRelation = relation; for (JoinRelation joinRelation : joinRelations) { joinRelation.setLeft(leftChildRelation); leftChildRelation = joinRelation; } return leftChildRelation; } @Override public ParseNode visitParenthesizedRelation(StarRocksParser.ParenthesizedRelationContext context) { if (context.relations().relation().size() == 1) { return visit(context.relations().relation().get(0)); } else { List<Relation> relations = visit(context.relations().relation(), Relation.class); Iterator<Relation> iterator = relations.iterator(); Relation relation = iterator.next(); while (iterator.hasNext()) { relation = new JoinRelation(null, relation, iterator.next(), null, false); } return relation; } } @Override public ParseNode visitTableAtom(StarRocksParser.TableAtomContext context) { Token start = context.start; Token stop = context.stop; QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); TableName tableName = qualifiedNameToTableName(qualifiedName); PartitionNames partitionNames = null; if (context.partitionNames() != null) { stop = context.partitionNames().stop; partitionNames = (PartitionNames) visit(context.partitionNames()); } List<Long> tabletIds = Lists.newArrayList(); if (context.tabletList() != null) { stop = context.tabletList().stop; tabletIds = context.tabletList().INTEGER_VALUE().stream().map(ParseTree::getText) .map(Long::parseLong).collect(toList()); } List<Long> replicaLists = Lists.newArrayList(); if (context.replicaList() != null) { stop = context.replicaList().stop; replicaLists = context.replicaList().INTEGER_VALUE().stream().map(ParseTree::getText).map(Long::parseLong) .collect(toList()); } TableRelation tableRelation = new TableRelation(tableName, partitionNames, tabletIds, replicaLists, createPos(start, stop)); if (context.bracketHint() != null) { for (Identifier identifier : visit(context.bracketHint().identifier(), Identifier.class)) { tableRelation.addTableHint(identifier.getValue()); } } if (context.alias != null) { Identifier identifier = (Identifier) visit(context.alias); tableRelation.setAlias(new TableName(null, identifier.getValue())); } if (context.temporalClause() != null) { StringBuilder sb = new StringBuilder(); for (ParseTree child : context.temporalClause().children) { sb.append(child.getText()); sb.append(" "); } tableRelation.setTemporalClause(sb.toString()); } return tableRelation; } @Override public ParseNode visitJoinRelation(StarRocksParser.JoinRelationContext context) { Relation left = null; Relation right = (Relation) visit(context.rightRelation); JoinOperator joinType = JoinOperator.INNER_JOIN; if (context.crossOrInnerJoinType() != null) { if (context.crossOrInnerJoinType().CROSS() != null) { joinType = JoinOperator.CROSS_JOIN; } else { joinType = JoinOperator.INNER_JOIN; } } else if (context.outerAndSemiJoinType().LEFT() != null) { if (context.outerAndSemiJoinType().OUTER() != null) { joinType = JoinOperator.LEFT_OUTER_JOIN; } else if (context.outerAndSemiJoinType().SEMI() != null) { joinType = JoinOperator.LEFT_SEMI_JOIN; } else if (context.outerAndSemiJoinType().ANTI() != null) { joinType = JoinOperator.LEFT_ANTI_JOIN; } else { joinType = JoinOperator.LEFT_OUTER_JOIN; } } else if (context.outerAndSemiJoinType().RIGHT() != null) { if (context.outerAndSemiJoinType().OUTER() != null) { joinType = JoinOperator.RIGHT_OUTER_JOIN; } else if (context.outerAndSemiJoinType().SEMI() != null) { joinType = JoinOperator.RIGHT_SEMI_JOIN; } else if (context.outerAndSemiJoinType().ANTI() != null) { joinType = JoinOperator.RIGHT_ANTI_JOIN; } else { joinType = JoinOperator.RIGHT_OUTER_JOIN; } } else if (context.outerAndSemiJoinType().FULL() != null) { joinType = JoinOperator.FULL_OUTER_JOIN; } Expr predicate = null; List<String> usingColNames = null; if (context.joinCriteria() != null) { if (context.joinCriteria().ON() != null) { predicate = (Expr) visit(context.joinCriteria().expression()); } else { List<Identifier> criteria = visit(context.joinCriteria().identifier(), Identifier.class); usingColNames = criteria.stream().map(Identifier::getValue).collect(Collectors.toList()); } } JoinRelation joinRelation = new JoinRelation(joinType, left, right, predicate, context.LATERAL() != null, createPos(context)); joinRelation.setUsingColNames(usingColNames); if (context.bracketHint() != null) { joinRelation.setJoinHint(((Identifier) visit(context.bracketHint().identifier(0))).getValue()); if (context.bracketHint().primaryExpression() != null) { joinRelation.setSkewColumn((Expr) visit(context.bracketHint().primaryExpression())); } if (context.bracketHint().literalExpressionList() != null) { joinRelation.setSkewValues(visit(context.bracketHint().literalExpressionList().literalExpression(), Expr.class)); } } return joinRelation; } @Override public ParseNode visitInlineTable(StarRocksParser.InlineTableContext context) { List<ValueList> rowValues = visit(context.rowConstructor(), ValueList.class); List<List<Expr>> rows = rowValues.stream().map(ValueList::getRow).collect(toList()); List<String> colNames = getColumnNames(context.columnAliases()); if (colNames == null) { colNames = new ArrayList<>(); for (int i = 0; i < rows.get(0).size(); ++i) { colNames.add("column_" + i); } } ValuesRelation valuesRelation = new ValuesRelation(rows, colNames, createPos(context)); if (context.alias != null) { Identifier identifier = (Identifier) visit(context.alias); valuesRelation.setAlias(new TableName(null, identifier.getValue())); } return valuesRelation; } @Override public ParseNode visitNamedArguments(StarRocksParser.NamedArgumentsContext context) { String name = ((Identifier) visit(context.identifier())).getValue(); if (name == null || name.isEmpty() || name.equals(" ")) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExpr(" The left of => shouldn't be empty")); } Expr node = (Expr) visit(context.expression()); if (node == null) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExpr(" The right of => shouldn't be null")); } return new NamedArgument(name, node); } @Override public ParseNode visitTableFunction(StarRocksParser.TableFunctionContext context) { QualifiedName functionName = getQualifiedName(context.qualifiedName()); List<Expr> parameters = visit(context.expressionList().expression(), Expr.class); FunctionCallExpr functionCallExpr = new FunctionCallExpr(FunctionName.createFnName(functionName.toString()), parameters); TableFunctionRelation tableFunctionRelation = new TableFunctionRelation(functionCallExpr); if (context.alias != null) { Identifier identifier = (Identifier) visit(context.alias); tableFunctionRelation.setAlias(new TableName(null, identifier.getValue())); } tableFunctionRelation.setColumnOutputNames(getColumnNames(context.columnAliases())); return tableFunctionRelation; } @Override public ParseNode visitNormalizedTableFunction(StarRocksParser.NormalizedTableFunctionContext context) { QualifiedName functionName = getQualifiedName(context.qualifiedName()); List<Expr> parameters = null; if (context.argumentList().expressionList() != null) { parameters = visit(context.argumentList().expressionList().expression(), Expr.class); } else { parameters = visit(context.argumentList().namedArgumentList().namedArgument(), Expr.class); } int namedArgNum = parameters.stream().filter(f -> f instanceof NamedArgument).collect(toList()).size(); if (namedArgNum > 0 && namedArgNum < parameters.size()) { throw new SemanticException("All arguments must be passed by name or all must be passed positionally"); } FunctionCallExpr functionCallExpr = new FunctionCallExpr(FunctionName.createFnName(functionName.toString()), parameters, createPos(context)); TableFunctionRelation relation = new TableFunctionRelation(functionCallExpr); if (context.alias != null) { Identifier identifier = (Identifier) visit(context.alias); relation.setAlias(new TableName(null, identifier.getValue())); } relation.setColumnOutputNames(getColumnNames(context.columnAliases())); return new NormalizedTableFunctionRelation(relation); } @Override public ParseNode visitFileTableFunction(StarRocksParser.FileTableFunctionContext context) { Map<String, String> properties = getPropertyList(context.propertyList()); return new FileTableFunctionRelation(properties, NodePosition.ZERO); } @Override public ParseNode visitRowConstructor(StarRocksParser.RowConstructorContext context) { ArrayList<Expr> row = new ArrayList<>(visit(context.expressionList().expression(), Expr.class)); return new ValueList(row, createPos(context)); } @Override public ParseNode visitPartitionNames(StarRocksParser.PartitionNamesContext context) { if (context.keyPartitions() != null) { return visit(context.keyPartitions()); } List<Identifier> identifierList = visit(context.identifierOrString(), Identifier.class); return new PartitionNames(context.TEMPORARY() != null, identifierList.stream().map(Identifier::getValue).collect(toList()), createPos(context)); } @Override public ParseNode visitKeyPartitionList(StarRocksParser.KeyPartitionListContext context) { List<String> partitionColNames = Lists.newArrayList(); List<Expr> partitionColValues = Lists.newArrayList(); for (StarRocksParser.KeyPartitionContext pair : context.keyPartition()) { Identifier partitionName = (Identifier) visit(pair.partitionColName); Expr partitionValue = (Expr) visit(pair.partitionColValue); partitionColNames.add(partitionName.getValue()); partitionColValues.add(partitionValue); } return new PartitionNames(false, new ArrayList<>(), partitionColNames, partitionColValues, NodePosition.ZERO); } @Override public ParseNode visitSubquery(StarRocksParser.SubqueryContext context) { return visit(context.queryRelation()); } @Override public ParseNode visitQueryWithParentheses(StarRocksParser.QueryWithParenthesesContext context) { QueryRelation relation = (QueryRelation) visit(context.subquery()); return new SubqueryRelation(new QueryStatement(relation)); } @Override public ParseNode visitSubqueryWithAlias(StarRocksParser.SubqueryWithAliasContext context) { QueryRelation queryRelation = (QueryRelation) visit(context.subquery()); SubqueryRelation subqueryRelation = new SubqueryRelation(new QueryStatement(queryRelation)); if (context.alias != null) { Identifier identifier = (Identifier) visit(context.alias); subqueryRelation.setAlias(new TableName(null, identifier.getValue())); } else { subqueryRelation.setAlias(new TableName(null, null)); } subqueryRelation.setColumnOutputNames(getColumnNames(context.columnAliases())); return subqueryRelation; } @Override public ParseNode visitSubqueryExpression(StarRocksParser.SubqueryExpressionContext context) { QueryRelation queryRelation = (QueryRelation) visit(context.subquery()); return new Subquery(new QueryStatement(queryRelation)); } @Override public ParseNode visitInSubquery(StarRocksParser.InSubqueryContext context) { boolean isNotIn = context.NOT() != null; QueryRelation query = (QueryRelation) visit(context.queryRelation()); return new InPredicate((Expr) visit(context.value), new Subquery(new QueryStatement(query)), isNotIn, createPos(context)); } @Override public ParseNode visitTupleInSubquery(StarRocksParser.TupleInSubqueryContext context) { boolean isNotIn = context.NOT() != null; QueryRelation query = (QueryRelation) visit(context.queryRelation()); List<Expr> tupleExpressions = visit(context.expression(), Expr.class); return new MultiInPredicate(tupleExpressions, new Subquery(new QueryStatement(query)), isNotIn, createPos(context)); } @Override public ParseNode visitExists(StarRocksParser.ExistsContext context) { QueryRelation query = (QueryRelation) visit(context.queryRelation()); return new ExistsPredicate(new Subquery(new QueryStatement(query)), false, createPos(context)); } @Override public ParseNode visitScalarSubquery(StarRocksParser.ScalarSubqueryContext context) { BinaryType op = getComparisonOperator(((TerminalNode) context.comparisonOperator().getChild(0)) .getSymbol()); Subquery subquery = new Subquery(new QueryStatement((QueryRelation) visit(context.queryRelation()))); return new BinaryPredicate(op, (Expr) visit(context.booleanExpression()), subquery, createPos(context)); } @Override public ParseNode visitShowFunctionsStatement(StarRocksParser.ShowFunctionsStatementContext context) { boolean isBuiltIn = context.BUILTIN() != null; boolean isGlobal = context.GLOBAL() != null; boolean isVerbose = context.FULL() != null; String dbName = null; if (context.db != null) { dbName = getQualifiedName(context.db).toString(); } String pattern = null; if (context.pattern != null) { pattern = ((StringLiteral) visit(context.pattern)).getValue(); } Expr where = null; if (context.expression() != null) { where = (Expr) visit(context.expression()); } return new ShowFunctionsStmt(dbName, isBuiltIn, isGlobal, isVerbose, pattern, where, createPos(context)); } @Override public ParseNode visitShowPrivilegesStatement(StarRocksParser.ShowPrivilegesStatementContext ctx) { return new ShowPrivilegesStmt(); } @Override public ParseNode visitDropFunctionStatement(StarRocksParser.DropFunctionStatementContext context) { QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); String functionName = qualifiedName.toString(); boolean isGlobal = context.GLOBAL() != null; FunctionName fnName = FunctionName.createFnName(functionName); if (isGlobal) { if (!Strings.isNullOrEmpty(fnName.getDb())) { throw new ParsingException(PARSER_ERROR_MSG.invalidUDFName(functionName), qualifiedName.getPos()); } fnName.setAsGlobalFunction(); } return new DropFunctionStmt(fnName, getFunctionArgsDef(context.typeList()), createPos(context)); } @Override public ParseNode visitCreateFunctionStatement(StarRocksParser.CreateFunctionStatementContext context) { String functionType = "SCALAR"; boolean isGlobal = context.GLOBAL() != null; if (context.functionType != null) { functionType = context.functionType.getText(); } QualifiedName qualifiedName = getQualifiedName(context.qualifiedName()); String functionName = qualifiedName.toString(); TypeDef returnTypeDef = new TypeDef(getType(context.returnType), createPos(context.returnType)); TypeDef intermediateType = null; if (context.intermediateType != null) { intermediateType = new TypeDef(getType(context.intermediateType), createPos(context.intermediateType)); } Map<String, String> properties = null; if (context.properties() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } FunctionName fnName = FunctionName.createFnName(functionName); if (isGlobal) { if (!Strings.isNullOrEmpty(fnName.getDb())) { throw new ParsingException(PARSER_ERROR_MSG.invalidUDFName(functionName), qualifiedName.getPos()); } fnName.setAsGlobalFunction(); } return new CreateFunctionStmt(functionType, fnName, getFunctionArgsDef(context.typeList()), returnTypeDef, intermediateType, properties); } @Override public ParseNode visitCreateUserStatement(StarRocksParser.CreateUserStatementContext context) { UserDesc userDesc; Token start = context.user().start; Token stop; UserIdentity user = (UserIdentity) visit(context.user()); UserAuthOption authOption = context.authOption() == null ? null : (UserAuthOption) visit(context.authOption()); if (authOption == null) { userDesc = new UserDesc(user, "", false, user.getPos()); } else if (authOption.getAuthPlugin() == null) { stop = context.authOption().stop; userDesc = new UserDesc(user, authOption.getPassword(), authOption.isPasswordPlain(), createPos(start, stop)); } else { stop = context.authOption().stop; userDesc = new UserDesc(user, authOption.getAuthPlugin(), authOption.getAuthString(), authOption.isPasswordPlain(), createPos(start, stop)); } boolean ifNotExists = context.IF() != null; List<String> roles = new ArrayList<>(); if (context.roleList() != null) { roles.addAll(context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(toList())); } return new CreateUserStmt(ifNotExists, userDesc, roles, createPos(context)); } @Override public ParseNode visitDropUserStatement(StarRocksParser.DropUserStatementContext context) { UserIdentity user = (UserIdentity) visit(context.user()); return new DropUserStmt(user, context.EXISTS() != null, createPos(context)); } @Override public ParseNode visitAlterUserStatement(StarRocksParser.AlterUserStatementContext context) { UserDesc userDesc; UserIdentity user = (UserIdentity) visit(context.user()); Token start = context.user().start; Token stop; if (context.ROLE() != null) { List<String> roles = new ArrayList<>(); if (context.roleList() != null) { roles.addAll(context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(toList())); } SetRoleType setRoleType; if (context.ALL() != null) { setRoleType = SetRoleType.ALL; } else if (context.NONE() != null) { setRoleType = SetRoleType.NONE; } else { setRoleType = SetRoleType.ROLE; } return new SetDefaultRoleStmt(user, setRoleType, roles, createPos(context)); } stop = context.authOption().stop; UserAuthOption authOption = (UserAuthOption) visit(context.authOption()); if (authOption.getAuthPlugin() == null) { userDesc = new UserDesc(user, authOption.getPassword(), authOption.isPasswordPlain(), createPos(start, stop)); } else { userDesc = new UserDesc(user, authOption.getAuthPlugin(), authOption.getAuthString(), authOption.isPasswordPlain(), createPos(start, stop)); } return new AlterUserStmt(userDesc, context.EXISTS() != null, createPos(context)); } @Override public ParseNode visitShowUserStatement(StarRocksParser.ShowUserStatementContext context) { NodePosition pos = createPos(context); if (context.USERS() != null) { return new ShowUserStmt(true, pos); } else { return new ShowUserStmt(false, pos); } } @Override public ParseNode visitShowAllAuthentication(StarRocksParser.ShowAllAuthenticationContext context) { return new ShowAuthenticationStmt(null, true, createPos(context)); } @Override public ParseNode visitShowAuthenticationForUser(StarRocksParser.ShowAuthenticationForUserContext context) { NodePosition pos = createPos(context); if (context.user() != null) { return new ShowAuthenticationStmt((UserIdentity) visit(context.user()), false, pos); } else { return new ShowAuthenticationStmt(null, false, pos); } } @Override public ParseNode visitExecuteAsStatement(StarRocksParser.ExecuteAsStatementContext context) { boolean allowRevert = context.WITH() == null; return new ExecuteAsStmt((UserIdentity) visit(context.user()), allowRevert, createPos(context)); } @Override public ParseNode visitCreateRoleStatement(StarRocksParser.CreateRoleStatementContext context) { List<String> roles = context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(Collectors.toList()); String comment = context.comment() == null ? "" : ((StringLiteral) visit(context.comment())).getStringValue(); return new CreateRoleStmt(roles, context.NOT() != null, comment, createPos(context)); } @Override public ParseNode visitAlterRoleStatement(StarRocksParser.AlterRoleStatementContext context) { List<String> roles = context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(Collectors.toList()); StringLiteral stringLiteral = (StringLiteral) visit(context.string()); String comment = stringLiteral.getStringValue(); return new AlterRoleStmt(roles, context.IF() != null, comment); } @Override public ParseNode visitDropRoleStatement(StarRocksParser.DropRoleStatementContext context) { List<String> roles = new ArrayList<>(); roles.addAll(context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(toList())); return new DropRoleStmt(roles, context.EXISTS() != null, createPos(context)); } @Override public ParseNode visitShowRolesStatement(StarRocksParser.ShowRolesStatementContext context) { return new ShowRolesStmt(); } @Override public ParseNode visitGrantRoleToUser(StarRocksParser.GrantRoleToUserContext context) { List<String> roleNameList = new ArrayList<>(); for (StarRocksParser.IdentifierOrStringContext oneContext : context.identifierOrStringList() .identifierOrString()) { roleNameList.add(((Identifier) visit(oneContext)).getValue()); } return new GrantRoleStmt(roleNameList, (UserIdentity) visit(context.user()), createPos(context)); } @Override public ParseNode visitGrantRoleToRole(StarRocksParser.GrantRoleToRoleContext context) { List<String> roleNameList = new ArrayList<>(); for (StarRocksParser.IdentifierOrStringContext oneContext : context.identifierOrStringList() .identifierOrString()) { roleNameList.add(((Identifier) visit(oneContext)).getValue()); } return new GrantRoleStmt(roleNameList, ((Identifier) visit(context.identifierOrString())).getValue(), createPos(context)); } @Override public ParseNode visitRevokeRoleFromUser(StarRocksParser.RevokeRoleFromUserContext context) { List<String> roleNameList = new ArrayList<>(); for (StarRocksParser.IdentifierOrStringContext oneContext : context.identifierOrStringList() .identifierOrString()) { roleNameList.add(((Identifier) visit(oneContext)).getValue()); } return new RevokeRoleStmt(roleNameList, (UserIdentity) visit(context.user()), createPos(context)); } @Override public ParseNode visitRevokeRoleFromRole(StarRocksParser.RevokeRoleFromRoleContext context) { List<String> roleNameList = new ArrayList<>(); for (StarRocksParser.IdentifierOrStringContext oneContext : context.identifierOrStringList() .identifierOrString()) { roleNameList.add(((Identifier) visit(oneContext)).getValue()); } return new RevokeRoleStmt(roleNameList, ((Identifier) visit(context.identifierOrString())).getValue(), createPos(context)); } @Override public ParseNode visitSetRoleStatement(StarRocksParser.SetRoleStatementContext context) { List<String> roles = new ArrayList<>(); if (context.roleList() != null) { roles.addAll(context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(toList())); } SetRoleType setRoleType; if (context.ALL() != null) { setRoleType = SetRoleType.ALL; } else if (context.DEFAULT() != null) { setRoleType = SetRoleType.DEFAULT; } else if (context.NONE() != null) { setRoleType = SetRoleType.NONE; } else { setRoleType = SetRoleType.ROLE; } return new SetRoleStmt(setRoleType, roles, createPos(context)); } @Override public ParseNode visitSetDefaultRoleStatement(StarRocksParser.SetDefaultRoleStatementContext context) { List<String> roles = new ArrayList<>(); if (context.roleList() != null) { roles.addAll(context.roleList().identifierOrString().stream().map(this::visit).map( s -> ((Identifier) s).getValue()).collect(toList())); } SetRoleType setRoleType; if (context.ALL() != null) { setRoleType = SetRoleType.ALL; } else if (context.NONE() != null) { setRoleType = SetRoleType.NONE; } else { setRoleType = SetRoleType.ROLE; } return new SetDefaultRoleStmt((UserIdentity) visit(context.user()), setRoleType, roles, createPos(context)); } @Override public ParseNode visitShowGrantsStatement(StarRocksParser.ShowGrantsStatementContext context) { NodePosition pos = createPos(context); if (context.ROLE() != null) { Identifier role = (Identifier) visit(context.identifierOrString()); return new ShowGrantsStmt(null, role.getValue(), pos); } else { UserIdentity userId = context.user() == null ? null : (UserIdentity) visit(context.user()); return new ShowGrantsStmt(userId, null, pos); } } @Override public ParseNode visitAuthWithoutPlugin(StarRocksParser.AuthWithoutPluginContext context) { String password = ((StringLiteral) visit(context.string())).getStringValue(); boolean isPasswordPlain = context.PASSWORD() == null; return new UserAuthOption(password, null, null, isPasswordPlain, createPos(context)); } @Override public ParseNode visitAuthWithPlugin(StarRocksParser.AuthWithPluginContext context) { Identifier authPlugin = (Identifier) visit(context.identifierOrString()); String authString = context.string() == null ? null : ((StringLiteral) visit(context.string())).getStringValue(); boolean isPasswordPlain = context.AS() == null; return new UserAuthOption(null, authPlugin.getValue().toUpperCase(), authString, isPasswordPlain, createPos(context)); } @Override public ParseNode visitGrantRevokeClause(StarRocksParser.GrantRevokeClauseContext context) { NodePosition pos = createPos(context); if (context.user() != null) { UserIdentity user = (UserIdentity) visit(context.user()); return new GrantRevokeClause(user, null, pos); } else { String roleName = ((Identifier) visit(context.identifierOrString())).getValue(); return new GrantRevokeClause(null, roleName, pos); } } @Override public ParseNode visitGrantOnUser(StarRocksParser.GrantOnUserContext context) { List<String> privList = Collections.singletonList("IMPERSONATE"); GrantRevokeClause clause = (GrantRevokeClause) visit(context.grantRevokeClause()); List<UserIdentity> users = context.user().stream() .map(user -> (UserIdentity) visit(user)).collect(toList()); GrantRevokePrivilegeObjects objects = new GrantRevokePrivilegeObjects(); objects.setUserPrivilegeObjectList(users); return new GrantPrivilegeStmt(privList, "USER", clause, objects, context.WITH() != null, createPos(context)); } @Override public ParseNode visitRevokeOnUser(StarRocksParser.RevokeOnUserContext context) { List<String> privList = Collections.singletonList("IMPERSONATE"); GrantRevokeClause clause = (GrantRevokeClause) visit(context.grantRevokeClause()); List<UserIdentity> users = context.user().stream() .map(user -> (UserIdentity) visit(user)).collect(toList()); GrantRevokePrivilegeObjects objects = new GrantRevokePrivilegeObjects(); objects.setUserPrivilegeObjectList(users); return new RevokePrivilegeStmt(privList, "USER", clause, objects, createPos(context)); } @Override public ParseNode visitGrantOnTableBrief(StarRocksParser.GrantOnTableBriefContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); return new GrantPrivilegeStmt(privilegeList, "TABLE", (GrantRevokeClause) visit(context.grantRevokeClause()), parsePrivilegeObjectNameList(context.privObjectNameList()), context.WITH() != null, createPos(context)); } @Override public ParseNode visitRevokeOnTableBrief(StarRocksParser.RevokeOnTableBriefContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); return new RevokePrivilegeStmt(privilegeList, "TABLE", (GrantRevokeClause) visit(context.grantRevokeClause()), parsePrivilegeObjectNameList(context.privObjectNameList()), createPos(context)); } @Override public ParseNode visitGrantOnSystem(StarRocksParser.GrantOnSystemContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); return new GrantPrivilegeStmt(privilegeList, "SYSTEM", (GrantRevokeClause) visit(context.grantRevokeClause()), null, context.WITH() != null, createPos(context)); } @Override public ParseNode visitRevokeOnSystem(StarRocksParser.RevokeOnSystemContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); return new RevokePrivilegeStmt(privilegeList, "SYSTEM", (GrantRevokeClause) visit(context.grantRevokeClause()), null, createPos(context)); } @Override public ParseNode visitGrantOnPrimaryObj(StarRocksParser.GrantOnPrimaryObjContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); String objectTypeUnResolved = ((Identifier) visit(context.privObjectType())).getValue().toUpperCase(); return new GrantPrivilegeStmt(privilegeList, objectTypeUnResolved, (GrantRevokeClause) visit(context.grantRevokeClause()), parsePrivilegeObjectNameList(context.privObjectNameList()), context.WITH() != null, createPos(context)); } @Override public ParseNode visitRevokeOnPrimaryObj(StarRocksParser.RevokeOnPrimaryObjContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); String objectTypeUnResolved = ((Identifier) visit(context.privObjectType())).getValue().toUpperCase(); return new RevokePrivilegeStmt(privilegeList, objectTypeUnResolved, (GrantRevokeClause) visit(context.grantRevokeClause()), parsePrivilegeObjectNameList(context.privObjectNameList()), createPos(context)); } @Override public ParseNode visitGrantOnFunc(StarRocksParser.GrantOnFuncContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); GrantRevokePrivilegeObjects objects = buildGrantRevokePrivWithFunction(context.privFunctionObjectNameList(), context.GLOBAL() != null); return new GrantPrivilegeStmt(privilegeList, extendPrivilegeType(context.GLOBAL() != null, "FUNCTION"), (GrantRevokeClause) visit(context.grantRevokeClause()), objects, context.WITH() != null, createPos(context)); } @Override public ParseNode visitRevokeOnFunc(StarRocksParser.RevokeOnFuncContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); GrantRevokePrivilegeObjects objects = buildGrantRevokePrivWithFunction(context.privFunctionObjectNameList(), context.GLOBAL() != null); return new RevokePrivilegeStmt(privilegeList, extendPrivilegeType(context.GLOBAL() != null, "FUNCTION"), (GrantRevokeClause) visit(context.grantRevokeClause()), objects, createPos(context)); } private GrantRevokePrivilegeObjects buildGrantRevokePrivWithFunction( StarRocksParser.PrivFunctionObjectNameListContext context, boolean isGlobal) { List<Pair<FunctionName, FunctionArgsDef>> functions = new ArrayList<>(); int functionSize = context.qualifiedName().size(); List<StarRocksParser.TypeListContext> typeListContexts = context.typeList(); for (int i = 0; i < functionSize; ++i) { StarRocksParser.QualifiedNameContext qualifiedNameContext = context.qualifiedName(i); QualifiedName qualifiedName = getQualifiedName(qualifiedNameContext); FunctionName functionName; if (qualifiedName.getParts().size() == 1) { functionName = new FunctionName(qualifiedName.getParts().get(0)); } else if (qualifiedName.getParts().size() == 2) { functionName = new FunctionName(qualifiedName.getParts().get(0), qualifiedName.getParts().get(1)); } else { throw new SemanticException("Error function format " + qualifiedName); } if (isGlobal) { functionName.setAsGlobalFunction(); } FunctionArgsDef argsDef = getFunctionArgsDef(typeListContexts.get(i)); functions.add(Pair.create(functionName, argsDef)); } GrantRevokePrivilegeObjects objects = new GrantRevokePrivilegeObjects(); objects.setFunctions(functions); return objects; } public String extendPrivilegeType(boolean isGlobal, String type) { if (isGlobal) { if (type.equals("FUNCTIONS") || type.equals("FUNCTION")) { return "GLOBAL " + type; } } return type; } @Override public ParseNode visitGrantOnAll(StarRocksParser.GrantOnAllContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); String objectTypeUnResolved = ((Identifier) visit(context.privObjectTypePlural())).getValue().toUpperCase(); GrantRevokePrivilegeObjects objects = new GrantRevokePrivilegeObjects(); ArrayList<String> tokenList; if (context.isAll != null) { tokenList = Lists.newArrayList("*", "*"); } else if (context.IN() != null) { String dbName = ((Identifier) visit(context.identifierOrString())).getValue(); tokenList = Lists.newArrayList(dbName, "*"); } else { tokenList = Lists.newArrayList("*"); } objects.setPrivilegeObjectNameTokensList(Collections.singletonList(tokenList)); GrantPrivilegeStmt grantPrivilegeStmt = new GrantPrivilegeStmt(privilegeList, objectTypeUnResolved, (GrantRevokeClause) visit(context.grantRevokeClause()), objects, context.WITH() != null, createPos(context)); grantPrivilegeStmt.setGrantOnAll(); return grantPrivilegeStmt; } @Override public ParseNode visitRevokeOnAll(StarRocksParser.RevokeOnAllContext context) { List<String> privilegeList = context.privilegeTypeList().privilegeType().stream().map( c -> ((Identifier) visit(c)).getValue().toUpperCase()).collect(toList()); String objectTypeUnResolved = ((Identifier) visit(context.privObjectTypePlural())).getValue().toUpperCase(); GrantRevokePrivilegeObjects objects = new GrantRevokePrivilegeObjects(); ArrayList<String> tokenList; if (context.isAll != null) { tokenList = Lists.newArrayList("*", "*"); } else if (context.IN() != null) { String dbName = ((Identifier) visit(context.identifierOrString())).getValue(); tokenList = Lists.newArrayList(dbName, "*"); } else { tokenList = Lists.newArrayList("*"); } objects.setPrivilegeObjectNameTokensList(Collections.singletonList(tokenList)); RevokePrivilegeStmt revokePrivilegeStmt = new RevokePrivilegeStmt(privilegeList, objectTypeUnResolved, (GrantRevokeClause) visit(context.grantRevokeClause()), objects, createPos(context)); revokePrivilegeStmt.setGrantOnAll(); return revokePrivilegeStmt; } @Override public ParseNode visitPrivilegeType(StarRocksParser.PrivilegeTypeContext context) { NodePosition pos = createPos(context); List<String> ps = new ArrayList<>(); for (int i = 0; i < context.getChildCount(); ++i) { ps.add(context.getChild(i).getText()); } return new Identifier(Joiner.on(" ").join(ps), pos); } @Override public ParseNode visitPrivObjectType(StarRocksParser.PrivObjectTypeContext context) { NodePosition pos = createPos(context); List<String> ps = new ArrayList<>(); for (int i = 0; i < context.getChildCount(); ++i) { ps.add(context.getChild(i).getText()); } return new Identifier(Joiner.on(" ").join(ps), pos); } @Override public ParseNode visitPrivObjectTypePlural(StarRocksParser.PrivObjectTypePluralContext context) { NodePosition pos = createPos(context); List<String> ps = new ArrayList<>(); for (int i = 0; i < context.getChildCount(); ++i) { ps.add(context.getChild(i).getText()); } return new Identifier(Joiner.on(" ").join(ps), pos); } private GrantRevokePrivilegeObjects parsePrivilegeObjectNameList( StarRocksParser.PrivObjectNameListContext context) { if (context == null) { return null; } GrantRevokePrivilegeObjects grantRevokePrivilegeObjects = new GrantRevokePrivilegeObjects(createPos(context)); List<List<String>> objectNameList = new ArrayList<>(); for (StarRocksParser.PrivObjectNameContext privObjectNameContext : context.privObjectName()) { objectNameList.add(privObjectNameContext.identifierOrStringOrStar().stream() .map(c -> ((Identifier) visit(c)).getValue()).collect(toList())); } grantRevokePrivilegeObjects.setPrivilegeObjectNameTokensList(objectNameList); return grantRevokePrivilegeObjects; } @Override public ParseNode visitCreateSecurityIntegrationStatement( StarRocksParser.CreateSecurityIntegrationStatementContext context) { String name = ((Identifier) visit(context.identifier())).getValue(); Map<String, String> propertyMap = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { propertyMap.put(property.getKey(), property.getValue()); } } return new CreateSecurityIntegrationStatement(name, propertyMap, createPos(context)); } @Override public ParseNode visitExpressionOrDefault(StarRocksParser.ExpressionOrDefaultContext context) { if (context.DEFAULT() != null) { return new DefaultValueExpr(createPos(context)); } else { return visit(context.expression()); } } @Override public ParseNode visitExpressionsWithDefault(StarRocksParser.ExpressionsWithDefaultContext context) { ArrayList<Expr> row = Lists.newArrayList(); for (int i = 0; i < context.expressionOrDefault().size(); ++i) { row.add((Expr) visit(context.expressionOrDefault(i))); } return new ValueList(row, createPos(context)); } @Override public ParseNode visitExpressionSingleton(StarRocksParser.ExpressionSingletonContext context) { return visit(context.expression()); } @Override public ParseNode visitLogicalNot(StarRocksParser.LogicalNotContext context) { return new CompoundPredicate(CompoundPredicate.Operator.NOT, (Expr) visit(context.expression()), null, createPos(context)); } @Override public ParseNode visitLogicalBinary(StarRocksParser.LogicalBinaryContext context) { Expr left = (Expr) visit(context.left); Expr right = (Expr) visit(context.right); return new CompoundPredicate(getLogicalBinaryOperator(context.operator), left, right, createPos(context)); } private static CompoundPredicate.Operator getLogicalBinaryOperator(Token token) { switch (token.getType()) { case StarRocksLexer.AND: case StarRocksLexer.LOGICAL_AND: return CompoundPredicate.Operator.AND; default: return CompoundPredicate.Operator.OR; } } @Override public ParseNode visitPredicate(StarRocksParser.PredicateContext context) { if (context.predicateOperations() != null) { return visit(context.predicateOperations()); } else if (context.tupleInSubquery() != null) { return visit(context.tupleInSubquery()); } else { return visit(context.valueExpression()); } } @Override public ParseNode visitIsNull(StarRocksParser.IsNullContext context) { Expr child = (Expr) visit(context.booleanExpression()); NodePosition pos = createPos(context); if (context.NOT() == null) { return new IsNullPredicate(child, false, pos); } else { return new IsNullPredicate(child, true, pos); } } @Override public ParseNode visitComparison(StarRocksParser.ComparisonContext context) { BinaryType op = getComparisonOperator(((TerminalNode) context.comparisonOperator().getChild(0)) .getSymbol()); return new BinaryPredicate(op, (Expr) visit(context.left), (Expr) visit(context.right), createPos(context)); } private static BinaryType getComparisonOperator(Token symbol) { switch (symbol.getType()) { case StarRocksParser.EQ: return BinaryType.EQ; case StarRocksParser.NEQ: return BinaryType.NE; case StarRocksParser.LT: return BinaryType.LT; case StarRocksParser.LTE: return BinaryType.LE; case StarRocksParser.GT: return BinaryType.GT; case StarRocksParser.GTE: return BinaryType.GE; default: return BinaryType.EQ_FOR_NULL; } } @Override public ParseNode visitInList(StarRocksParser.InListContext context) { boolean isNotIn = context.NOT() != null; return new InPredicate( (Expr) visit(context.value), visit(context.expressionList().expression(), Expr.class), isNotIn, createPos(context)); } @Override public ParseNode visitBetween(StarRocksParser.BetweenContext context) { boolean isNotBetween = context.NOT() != null; return new BetweenPredicate( (Expr) visit(context.value), (Expr) visit(context.lower), (Expr) visit(context.upper), isNotBetween, createPos(context)); } @Override public ParseNode visitLike(StarRocksParser.LikeContext context) { LikePredicate likePredicate; NodePosition pos = createPos(context); if (context.REGEXP() != null || context.RLIKE() != null) { likePredicate = new LikePredicate(LikePredicate.Operator.REGEXP, (Expr) visit(context.value), (Expr) visit(context.pattern), pos); } else { likePredicate = new LikePredicate( LikePredicate.Operator.LIKE, (Expr) visit(context.value), (Expr) visit(context.pattern), pos); } if (context.NOT() != null) { return new CompoundPredicate(CompoundPredicate.Operator.NOT, likePredicate, null, pos); } else { return likePredicate; } } @Override public ParseNode visitSimpleCase(StarRocksParser.SimpleCaseContext context) { return new CaseExpr( (Expr) visit(context.caseExpr), visit(context.whenClause(), CaseWhenClause.class), (Expr) visitIfPresent(context.elseExpression), createPos(context)); } @Override public ParseNode visitSearchedCase(StarRocksParser.SearchedCaseContext context) { return new CaseExpr( null, visit(context.whenClause(), CaseWhenClause.class), (Expr) visitIfPresent(context.elseExpression), createPos(context)); } @Override public ParseNode visitWhenClause(StarRocksParser.WhenClauseContext context) { return new CaseWhenClause((Expr) visit(context.condition), (Expr) visit(context.result), createPos(context)); } @Override public ParseNode visitArithmeticUnary(StarRocksParser.ArithmeticUnaryContext context) { Expr child = (Expr) visit(context.primaryExpression()); NodePosition pos = createPos(context); switch (context.operator.getType()) { case StarRocksLexer.MINUS_SYMBOL: if (child.isLiteral() && child.getType().isNumericType()) { try { ((LiteralExpr) child).swapSign(); } catch (NotImplementedException e) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExpr(child.toSql()), child.getPos()); } return child; } else { return new ArithmeticExpr(ArithmeticExpr.Operator.MULTIPLY, new IntLiteral(-1), child, pos); } case StarRocksLexer.PLUS_SYMBOL: return child; case StarRocksLexer.BITNOT: return new ArithmeticExpr(ArithmeticExpr.Operator.BITNOT, child, null, pos); default: return new CompoundPredicate(CompoundPredicate.Operator.NOT, child, null, pos); } } @Override public ParseNode visitArithmeticBinary(StarRocksParser.ArithmeticBinaryContext context) { Expr left = (Expr) visit(context.left); Expr right = (Expr) visit(context.right); NodePosition pos = createPos(context); if (left instanceof IntervalLiteral) { return new TimestampArithmeticExpr(getArithmeticBinaryOperator(context.operator), right, ((IntervalLiteral) left).getValue(), ((IntervalLiteral) left).getUnitIdentifier().getDescription(), true, pos); } if (right instanceof IntervalLiteral) { return new TimestampArithmeticExpr(getArithmeticBinaryOperator(context.operator), left, ((IntervalLiteral) right).getValue(), ((IntervalLiteral) right).getUnitIdentifier().getDescription(), false, pos); } return new ArithmeticExpr(getArithmeticBinaryOperator(context.operator), left, right, pos); } private static ArithmeticExpr.Operator getArithmeticBinaryOperator(Token operator) { switch (operator.getType()) { case StarRocksLexer.PLUS_SYMBOL: return ArithmeticExpr.Operator.ADD; case StarRocksLexer.MINUS_SYMBOL: return ArithmeticExpr.Operator.SUBTRACT; case StarRocksLexer.ASTERISK_SYMBOL: return ArithmeticExpr.Operator.MULTIPLY; case StarRocksLexer.SLASH_SYMBOL: return ArithmeticExpr.Operator.DIVIDE; case StarRocksLexer.PERCENT_SYMBOL: case StarRocksLexer.MOD: return ArithmeticExpr.Operator.MOD; case StarRocksLexer.INT_DIV: return ArithmeticExpr.Operator.INT_DIVIDE; case StarRocksLexer.BITAND: return ArithmeticExpr.Operator.BITAND; case StarRocksLexer.BITOR: return ArithmeticExpr.Operator.BITOR; case StarRocksLexer.BITXOR: return ArithmeticExpr.Operator.BITXOR; case StarRocksLexer.BIT_SHIFT_LEFT: return ArithmeticExpr.Operator.BIT_SHIFT_LEFT; case StarRocksLexer.BIT_SHIFT_RIGHT: return ArithmeticExpr.Operator.BIT_SHIFT_RIGHT; case StarRocksLexer.BIT_SHIFT_RIGHT_LOGICAL: return ArithmeticExpr.Operator.BIT_SHIFT_RIGHT_LOGICAL; default: throw new ParsingException(PARSER_ERROR_MSG.wrongTypeOfArgs(operator.getText()), new NodePosition(operator)); } } @Override public ParseNode visitOdbcFunctionCallExpression(StarRocksParser.OdbcFunctionCallExpressionContext context) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) visit(context.functionCall()); OdbcScalarFunctionCall odbcScalarFunctionCall = new OdbcScalarFunctionCall(functionCallExpr); return odbcScalarFunctionCall.mappingFunction(); } private static List<Expr> getArgumentsForTimeSlice(Expr time, Expr value, String ident, String boundary) { List<Expr> exprs = Lists.newLinkedList(); exprs.add(time); addArgumentUseTypeInt(value, exprs); exprs.add(new StringLiteral(ident)); exprs.add(new StringLiteral(boundary)); return exprs; } private static void addArgumentUseTypeInt(Expr value, List<Expr> exprs) { try { if (value instanceof IntLiteral) { exprs.add(new IntLiteral(((IntLiteral) value).getValue(), Type.INT)); } else { exprs.add(value); } } catch (Exception e) { throw new IllegalArgumentException(String.format("Cast argument %s to int type failed.", value.toSql())); } } @Override @Override public ParseNode visitAggregationFunctionCall(StarRocksParser.AggregationFunctionCallContext context) { NodePosition pos = createPos(context); String functionName; boolean isGroupConcat = false; boolean isLegacyGroupConcat = false; boolean isDistinct = false; if (context.aggregationFunction().COUNT() != null) { functionName = FunctionSet.COUNT; } else if (context.aggregationFunction().AVG() != null) { functionName = FunctionSet.AVG; } else if (context.aggregationFunction().SUM() != null) { functionName = FunctionSet.SUM; } else if (context.aggregationFunction().MIN() != null) { functionName = FunctionSet.MIN; } else if (context.aggregationFunction().ARRAY_AGG() != null) { functionName = FunctionSet.ARRAY_AGG; } else if (context.aggregationFunction().ARRAY_AGG_DISTINCT() != null) { functionName = FunctionSet.ARRAY_AGG; isDistinct = true; } else if (context.aggregationFunction().GROUP_CONCAT() != null) { functionName = FunctionSet.GROUP_CONCAT; isGroupConcat = true; isLegacyGroupConcat = SqlModeHelper.check(sqlMode, SqlModeHelper.MODE_GROUP_CONCAT_LEGACY); } else { functionName = FunctionSet.MAX; } List<OrderByElement> orderByElements = new ArrayList<>(); if (context.aggregationFunction().ORDER() != null) { orderByElements = visit(context.aggregationFunction().sortItem(), OrderByElement.class); } List<String> hints = Lists.newArrayList(); if (context.aggregationFunction().bracketHint() != null) { hints = context.aggregationFunction().bracketHint().identifier().stream().map( RuleContext::getText).collect(Collectors.toList()); } if (context.aggregationFunction().setQuantifier() != null) { isDistinct = context.aggregationFunction().setQuantifier().DISTINCT() != null; } if (isDistinct && CollectionUtils.isEmpty(context.aggregationFunction().expression())) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(functionName), pos); } List<Expr> exprs = visit(context.aggregationFunction().expression(), Expr.class); if (isGroupConcat && !exprs.isEmpty() && context.aggregationFunction().SEPARATOR() == null) { if (isLegacyGroupConcat) { if (exprs.size() == 1) { Expr sepExpr; String sep = ", "; sepExpr = new StringLiteral(sep, pos); exprs.add(sepExpr); } } else { Expr sepExpr; String sep = ","; sepExpr = new StringLiteral(sep, pos); exprs.add(sepExpr); } } if (!orderByElements.isEmpty()) { int exprSize = exprs.size(); if (isGroupConcat) { exprSize--; } for (OrderByElement orderByElement : orderByElements) { Expr by = orderByElement.getExpr(); if (by instanceof IntLiteral) { long ordinal = ((IntLiteral) by).getLongValue(); if (ordinal < 1 || ordinal > exprSize) { throw new ParsingException(format("ORDER BY position %s is not in %s output list", ordinal, functionName), pos); } by = exprs.get((int) ordinal - 1); orderByElement.setExpr(by); } } orderByElements = orderByElements.stream().filter(x -> !x.getExpr().isConstant()).collect(toList()); } if (CollectionUtils.isNotEmpty(orderByElements)) { orderByElements.stream().forEach(e -> exprs.add(e.getExpr())); } FunctionCallExpr functionCallExpr = new FunctionCallExpr(functionName, context.aggregationFunction().ASTERISK_SYMBOL() == null ? new FunctionParams(isDistinct, exprs, orderByElements) : FunctionParams.createStarParam(), pos); functionCallExpr = SyntaxSugars.parse(functionCallExpr); functionCallExpr.setHints(hints); if (context.over() != null) { return buildOverClause(functionCallExpr, context.over(), pos); } return functionCallExpr; } @Override public ParseNode visitWindowFunctionCall(StarRocksParser.WindowFunctionCallContext context) { FunctionCallExpr functionCallExpr = (FunctionCallExpr) visit(context.windowFunction()); return buildOverClause(functionCallExpr, context.over(), createPos(context)); } @Override public ParseNode visitWindowFunction(StarRocksParser.WindowFunctionContext context) { FunctionCallExpr functionCallExpr = new FunctionCallExpr(context.name.getText().toLowerCase(), new FunctionParams(false, visit(context.expression(), Expr.class)), createPos(context)); functionCallExpr = SyntaxSugars.parse(functionCallExpr); boolean ignoreNull = CollectionUtils.isNotEmpty(context.ignoreNulls()) && context.ignoreNulls().stream().anyMatch(Objects::nonNull); functionCallExpr.setIgnoreNulls(ignoreNull); return functionCallExpr; } private AnalyticExpr buildOverClause(FunctionCallExpr functionCallExpr, StarRocksParser.OverContext context, NodePosition pos) { functionCallExpr.setIsAnalyticFnCall(true); List<OrderByElement> orderByElements = new ArrayList<>(); if (context.ORDER() != null) { orderByElements = visit(context.sortItem(), OrderByElement.class); } List<Expr> partitionExprs = visit(context.partition, Expr.class); return new AnalyticExpr(functionCallExpr, partitionExprs, orderByElements, (AnalyticWindow) visitIfPresent(context.windowFrame()), context.bracketHint() == null ? null : context.bracketHint().identifier().stream() .map(RuleContext::getText).collect(toList()), pos); } @Override public ParseNode visitExtract(StarRocksParser.ExtractContext context) { String fieldString = context.identifier().getText(); return new FunctionCallExpr(fieldString, new FunctionParams(Lists.newArrayList((Expr) visit(context.valueExpression()))), createPos(context)); } @Override public ParseNode visitCast(StarRocksParser.CastContext context) { return new CastExpr(new TypeDef(getType(context.type())), (Expr) visit(context.expression()), createPos(context)); } @Override public ParseNode visitConvert(StarRocksParser.ConvertContext context) { return new CastExpr(new TypeDef(getType(context.type())), (Expr) visit(context.expression()), createPos(context)); } @Override public ParseNode visitInformationFunctionExpression(StarRocksParser.InformationFunctionExpressionContext context) { return new InformationFunction(context.name.getText().toUpperCase(), createPos(context)); } @Override public ParseNode visitSpecialDateTimeExpression(StarRocksParser.SpecialDateTimeExpressionContext context) { return new FunctionCallExpr(context.name.getText().toUpperCase(), Lists.newArrayList()); } @Override public ParseNode visitSpecialFunctionExpression(StarRocksParser.SpecialFunctionExpressionContext context) { NodePosition pos = createPos(context); if (context.CHAR() != null) { return new FunctionCallExpr("char", visit(context.expression(), Expr.class), pos); } else if (context.DAY() != null) { return new FunctionCallExpr("day", visit(context.expression(), Expr.class), pos); } else if (context.HOUR() != null) { return new FunctionCallExpr("hour", visit(context.expression(), Expr.class), pos); } else if (context.IF() != null) { return new FunctionCallExpr("if", visit(context.expression(), Expr.class), pos); } else if (context.LEFT() != null) { return new FunctionCallExpr("left", visit(context.expression(), Expr.class), pos); } else if (context.LIKE() != null) { return new FunctionCallExpr("like", visit(context.expression(), Expr.class), pos); } else if (context.MINUTE() != null) { return new FunctionCallExpr("minute", visit(context.expression(), Expr.class), pos); } else if (context.MOD() != null) { return new FunctionCallExpr("mod", visit(context.expression(), Expr.class), pos); } else if (context.MONTH() != null) { return new FunctionCallExpr("month", visit(context.expression(), Expr.class), pos); } else if (context.QUARTER() != null) { return new FunctionCallExpr("quarter", visit(context.expression(), Expr.class), pos); } else if (context.REGEXP() != null) { return new FunctionCallExpr("regexp", visit(context.expression(), Expr.class), pos); } else if (context.REPLACE() != null) { return new FunctionCallExpr("replace", visit(context.expression(), Expr.class), pos); } else if (context.RIGHT() != null) { return new FunctionCallExpr("right", visit(context.expression(), Expr.class), pos); } else if (context.RLIKE() != null) { return new FunctionCallExpr("regexp", visit(context.expression(), Expr.class), pos); } else if (context.SECOND() != null) { return new FunctionCallExpr("second", visit(context.expression(), Expr.class), pos); } else if (context.YEAR() != null) { return new FunctionCallExpr("year", visit(context.expression(), Expr.class), pos); } else if (context.PASSWORD() != null) { StringLiteral stringLiteral = (StringLiteral) visit(context.string()); return new StringLiteral(new String(MysqlPassword.makeScrambledPassword(stringLiteral.getValue())), pos); } else if (context.FLOOR() != null) { return new FunctionCallExpr("floor", visit(context.expression(), Expr.class), pos); } else if (context.CEIL() != null) { return new FunctionCallExpr("ceil", visit(context.expression(), Expr.class), pos); } String functionName = context.TIMESTAMPADD() != null ? "TIMESTAMPADD" : "TIMESTAMPDIFF"; UnitIdentifier e1 = (UnitIdentifier) visit(context.unitIdentifier()); Expr e2 = (Expr) visit(context.expression(0)); Expr e3 = (Expr) visit(context.expression(1)); return new TimestampArithmeticExpr(functionName, e3, e2, e1.getDescription(), pos); } @Override public ParseNode visitConcat(StarRocksParser.ConcatContext context) { Expr left = (Expr) visit(context.left); Expr right = (Expr) visit(context.right); return new FunctionCallExpr("concat", new FunctionParams(Lists.newArrayList(left, right)), createPos(context)); } @Override public ParseNode visitNullLiteral(StarRocksParser.NullLiteralContext context) { return new NullLiteral(createPos(context)); } @Override public ParseNode visitBooleanLiteral(StarRocksParser.BooleanLiteralContext context) { NodePosition pos = createPos(context); String value = context.getText(); return new BoolLiteral("TRUE".equalsIgnoreCase(value), pos); } @Override public ParseNode visitNumericLiteral(StarRocksParser.NumericLiteralContext context) { return visit(context.number()); } @Override public ParseNode visitIntegerValue(StarRocksParser.IntegerValueContext context) { NodePosition pos = createPos(context); try { BigInteger intLiteral = new BigInteger(context.getText()); if (intLiteral.compareTo(LONG_MAX) <= 0) { return new IntLiteral(intLiteral.longValue(), pos); } else if (intLiteral.compareTo(LARGEINT_MAX_ABS) <= 0) { return new LargeIntLiteral(intLiteral.toString(), pos); } else { throw new ParsingException(PARSER_ERROR_MSG.numOverflow(context.getText()), pos); } } catch (NumberFormatException | AnalysisException e) { throw new ParsingException(PARSER_ERROR_MSG.invalidNumFormat(context.getText()), pos); } } @Override public ParseNode visitDoubleValue(StarRocksParser.DoubleValueContext context) { NodePosition pos = createPos(context); try { if (SqlModeHelper.check(sqlMode, SqlModeHelper.MODE_DOUBLE_LITERAL)) { return new FloatLiteral(context.getText(), pos); } else { BigDecimal decimal = new BigDecimal(context.getText()); int precision = DecimalLiteral.getRealPrecision(decimal); int scale = DecimalLiteral.getRealScale(decimal); int integerPartWidth = precision - scale; if (integerPartWidth > 38) { return new FloatLiteral(context.getText(), pos); } return new DecimalLiteral(decimal, pos); } } catch (AnalysisException | NumberFormatException e) { throw new ParsingException(PARSER_ERROR_MSG.invalidNumFormat(context.getText()), pos); } } @Override public ParseNode visitDecimalValue(StarRocksParser.DecimalValueContext context) { NodePosition pos = createPos(context); try { if (SqlModeHelper.check(sqlMode, SqlModeHelper.MODE_DOUBLE_LITERAL)) { return new FloatLiteral(context.getText(), pos); } else { return new DecimalLiteral(context.getText(), pos); } } catch (AnalysisException e) { throw new ParsingException(PARSER_ERROR_MSG.invalidNumFormat(context.getText()), pos); } } @Override public ParseNode visitDateLiteral(StarRocksParser.DateLiteralContext context) { NodePosition pos = createPos(context); String value = ((StringLiteral) visit(context.string())).getValue(); try { if (context.DATE() != null) { return new DateLiteral(value, Type.DATE); } else { return new DateLiteral(value, Type.DATETIME); } } catch (AnalysisException e) { throw new ParsingException(PARSER_ERROR_MSG.invalidDateFormat(value), pos); } } @Override public ParseNode visitString(StarRocksParser.StringContext context) { String quotedString; NodePosition pos = createPos(context); if (context.SINGLE_QUOTED_TEXT() != null) { quotedString = context.SINGLE_QUOTED_TEXT().getText(); quotedString = quotedString.substring(1, quotedString.length() - 1).replace("''", "'"); } else { quotedString = context.DOUBLE_QUOTED_TEXT().getText(); quotedString = quotedString.substring(1, quotedString.length() - 1).replace("\"\"", "\""); } return new StringLiteral(escapeBackSlash(quotedString), pos); } @Override public ParseNode visitBinary(StarRocksParser.BinaryContext context) { String quotedText; if (context.BINARY_SINGLE_QUOTED_TEXT() != null) { quotedText = context.BINARY_SINGLE_QUOTED_TEXT().getText(); } else { quotedText = context.BINARY_DOUBLE_QUOTED_TEXT().getText(); } return new VarBinaryLiteral(quotedText.substring(2, quotedText.length() - 1), createPos(context)); } private static String escapeBackSlash(String str) { StringWriter writer = new StringWriter(); int strLen = str.length(); for (int i = 0; i < strLen; ++i) { char c = str.charAt(i); if (c == '\\' && (i + 1) < strLen) { switch (str.charAt(i + 1)) { case 'n': writer.append('\n'); break; case 't': writer.append('\t'); break; case 'r': writer.append('\r'); break; case 'b': writer.append('\b'); break; case '0': writer.append('\0'); break; case 'Z': writer.append('\032'); break; case '_': case '%': writer.append('\\'); /* Fall through */ default: writer.append(str.charAt(i + 1)); break; } i++; } else { writer.append(c); } } return writer.toString(); } @Override public ParseNode visitArrayConstructor(StarRocksParser.ArrayConstructorContext context) { NodePosition pos = createPos(context); Type type = null; if (context.arrayType() != null) { type = new ArrayType(getType(context.arrayType().type())); } List<Expr> exprs; if (context.expressionList() != null) { exprs = visit(context.expressionList().expression(), Expr.class); } else { exprs = Collections.emptyList(); } return new ArrayExpr(type, exprs, pos); } @Override public ParseNode visitMapExpression(StarRocksParser.MapExpressionContext context) { ArrayList<Expr> row = Lists.newArrayList(); Expr key = (Expr) visit(context.key); Expr value = (Expr) visit(context.value); row.add(key); row.add(value); return new ValueList(row, createPos(context)); } @Override public ParseNode visitMapConstructor(StarRocksParser.MapConstructorContext context) { NodePosition pos = createPos(context); Type type = Type.ANY_MAP; if (context.mapType() != null) { type = getMapType(context.mapType()); } List<Expr> exprs; if (context.mapExpressionList() != null) { List<ValueList> rowValues = visit(context.mapExpressionList().mapExpression(), ValueList.class); List<List<Expr>> rows = rowValues.stream().map(ValueList::getRow).collect(toList()); exprs = rows.stream().flatMap(Collection::stream).collect(Collectors.toList()); int num = exprs.size(); if (num % 2 == 1) { throw new ParsingException(PARSER_ERROR_MSG.wrongNumOfArgs(num, "map()", "Arguments must be in key/value pairs"), pos); } } else { exprs = Collections.emptyList(); } return new MapExpr(type, exprs, pos); } @Override public ParseNode visitCollectionSubscript(StarRocksParser.CollectionSubscriptContext context) { Expr value = (Expr) visit(context.value); Expr index = (Expr) visit(context.index); return new CollectionElementExpr(value, index, false); } @Override public ParseNode visitArraySlice(StarRocksParser.ArraySliceContext context) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExpr("array slice"), createPos(context)); /* Expr expr = (Expr) visit(context.primaryExpression()); IntLiteral lowerBound; if (context.start != null) { lowerBound = new IntLiteral(Long.parseLong(context.start.getText())); } else { lowerBound = new IntLiteral(0); } IntLiteral upperBound; if (context.end != null) { upperBound = new IntLiteral(Long.parseLong(context.end.getText())); } else { upperBound = new IntLiteral(-1); } return new ArraySliceExpr(expr, lowerBound, upperBound); */ } @Override public ParseNode visitInterval(StarRocksParser.IntervalContext context) { return new IntervalLiteral((Expr) visit(context.value), (UnitIdentifier) visit(context.from), createPos(context)); } @Override public ParseNode visitUnitIdentifier(StarRocksParser.UnitIdentifierContext context) { return new UnitIdentifier(context.getText(), createPos(context)); } @Override public ParseNode visitUnitBoundary(StarRocksParser.UnitBoundaryContext context) { return new UnitBoundary(context.getText(), createPos(context)); } @Override public ParseNode visitDereference(StarRocksParser.DereferenceContext ctx) { Expr base = (Expr) visit(ctx.base); NodePosition pos = createPos(ctx); String fieldName; if (ctx.DOT_IDENTIFIER() != null) { fieldName = ctx.DOT_IDENTIFIER().getText().substring(1); } else { fieldName = ((Identifier) visit(ctx.fieldName)).getValue(); } if (base instanceof SlotRef) { SlotRef tmp = (SlotRef) base; List<String> parts = new ArrayList<>(tmp.getQualifiedName().getParts()); parts.add(fieldName); return new SlotRef(QualifiedName.of(parts, pos)); } else if (base instanceof SubfieldExpr) { SubfieldExpr subfieldExpr = (SubfieldExpr) base; ImmutableList.Builder<String> builder = new ImmutableList.Builder<>(); for (String tmpFieldName : subfieldExpr.getFieldNames()) { builder.add(tmpFieldName); } builder.add(fieldName); return new SubfieldExpr(subfieldExpr.getChild(0), builder.build(), pos); } else { return new SubfieldExpr(base, ImmutableList.of(fieldName), pos); } } @Override public ParseNode visitColumnReference(StarRocksParser.ColumnReferenceContext context) { Identifier identifier = (Identifier) visit(context.identifier()); List<String> parts = new ArrayList<>(); parts.add(identifier.getValue()); QualifiedName qualifiedName = QualifiedName.of(parts, createPos(context)); return new SlotRef(qualifiedName); } @Override public ParseNode visitArrowExpression(StarRocksParser.ArrowExpressionContext context) { Expr expr = (Expr) visit(context.primaryExpression()); StringLiteral stringLiteral = (StringLiteral) visit(context.string()); return new ArrowExpr(expr, stringLiteral, createPos(context)); } @Override public ParseNode visitLambdaFunctionExpr(StarRocksParser.LambdaFunctionExprContext context) { List<String> names = Lists.newLinkedList(); if (context.identifierList() != null) { final List<Identifier> identifierList = visit(context.identifierList().identifier(), Identifier.class); names = identifierList.stream().map(Identifier::getValue).collect(toList()); } else { names.add(((Identifier) visit(context.identifier())).getValue()); } List<Expr> arguments = Lists.newLinkedList(); Expr expr = null; if (context.expression() != null) { expr = (Expr) visit(context.expression()); } else if (context.expressionList() != null) { List<Expr> exprs = visit(context.expressionList().expression(), Expr.class); if (exprs.size() != 2) { throw new IllegalArgumentException("The right part of map lambda functions can accept at most 2 " + "expressions, but there are " + exprs.size()); } expr = new MapExpr(Type.ANY_MAP, exprs); } arguments.add(expr); for (int i = 0; i < names.size(); ++i) { arguments.add(new LambdaArgument(names.get(i))); } return new LambdaFunctionExpr(arguments); } @Override public ParseNode visitUserVariable(StarRocksParser.UserVariableContext context) { String variable = ((Identifier) visit(context.identifierOrString())).getValue(); return new VariableExpr(variable, SetType.USER, createPos(context)); } @Override public ParseNode visitSystemVariable(StarRocksParser.SystemVariableContext context) { SetType setType = getVariableType(context.varType()); return new VariableExpr(((Identifier) visit(context.identifier())).getValue(), setType, createPos(context)); } @Override public ParseNode visitCollate(StarRocksParser.CollateContext context) { return visit(context.primaryExpression()); } @Override public ParseNode visitParenthesizedExpression(StarRocksParser.ParenthesizedExpressionContext context) { return visit(context.expression()); } @Override public ParseNode visitUnquotedIdentifier(StarRocksParser.UnquotedIdentifierContext context) { return new Identifier(context.getText(), createPos(context)); } @Override public ParseNode visitBackQuotedIdentifier(StarRocksParser.BackQuotedIdentifierContext context) { return new Identifier(context.getText().replace("`", ""), createPos(context)); } @Override public ParseNode visitDigitIdentifier(StarRocksParser.DigitIdentifierContext context) { return new Identifier(context.getText(), createPos(context)); } @Override public ParseNode visitDictionaryGetExpr(StarRocksParser.DictionaryGetExprContext context) { List<Expr> params = visit(context.expressionList().expression(), Expr.class); return new DictionaryGetExpr(params); } private static StatementBase.ExplainLevel getExplainType(StarRocksParser.ExplainDescContext context) { StatementBase.ExplainLevel explainLevel = StatementBase.ExplainLevel.NORMAL; if (context.LOGICAL() != null) { explainLevel = StatementBase.ExplainLevel.LOGICAL; } else if (context.ANALYZE() != null) { explainLevel = StatementBase.ExplainLevel.ANALYZE; } else if (context.VERBOSE() != null) { explainLevel = StatementBase.ExplainLevel.VERBOSE; } else if (context.COSTS() != null) { explainLevel = StatementBase.ExplainLevel.COST; } else if (context.SCHEDULER() != null) { explainLevel = StatementBase.ExplainLevel.SCHEDULER; } return explainLevel; } public static SetType getVariableType(StarRocksParser.VarTypeContext context) { if (context == null) { return null; } if (context.GLOBAL() != null) { return SetType.GLOBAL; } else if (context.VERBOSE() != null) { return SetType.VERBOSE; } else { return SetType.SESSION; } } @Override public ParseNode visitAssignment(StarRocksParser.AssignmentContext context) { String column = ((Identifier) visit(context.identifier())).getValue(); Expr expr = (Expr) visit(context.expressionOrDefault()); return new ColumnAssignment(column, expr, createPos(context)); } @Override public ParseNode visitPartitionDesc(StarRocksParser.PartitionDescContext context) { List<PartitionDesc> partitionDescList = new ArrayList<>(); StarRocksParser.IdentifierListContext identifierListContext = context.identifierList(); if (context.functionCall() != null) { for (StarRocksParser.RangePartitionDescContext rangePartitionDescContext : context.rangePartitionDesc()) { final PartitionDesc rangePartitionDesc = (PartitionDesc) visit(rangePartitionDescContext); partitionDescList.add(rangePartitionDesc); } FunctionCallExpr functionCallExpr = (FunctionCallExpr) visit(context.functionCall()); List<String> columnList = AnalyzerUtils.checkAndExtractPartitionCol(functionCallExpr, null); RangePartitionDesc rangePartitionDesc = new RangePartitionDesc(columnList, partitionDescList); return new ExpressionPartitionDesc(rangePartitionDesc, functionCallExpr); } List<Identifier> identifierList = visit(identifierListContext.identifier(), Identifier.class); if (context.LIST() == null && context.RANGE() == null) { List<String> columnList = identifierList.stream().map(Identifier::getValue).collect(toList()); return new ListPartitionDesc(columnList, new ArrayList<>()); } else { List<PartitionDesc> partitionDesc = visit(context.rangePartitionDesc(), PartitionDesc.class); return new RangePartitionDesc( identifierList.stream().map(Identifier::getValue).collect(toList()), partitionDesc, createPos(context)); } } @Override public ParseNode visitSingleRangePartition(StarRocksParser.SingleRangePartitionContext context) { PartitionKeyDesc partitionKeyDesc = (PartitionKeyDesc) visit(context.partitionKeyDesc()); boolean ifNotExists = context.IF() != null; Map<String, String> properties = null; if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new SingleRangePartitionDesc(ifNotExists, ((Identifier) visit(context.identifier())).getValue(), partitionKeyDesc, properties, createPos(context)); } @Override public ParseNode visitMultiRangePartition(StarRocksParser.MultiRangePartitionContext context) { NodePosition pos = createPos(context); if (context.interval() != null) { IntervalLiteral intervalLiteral = (IntervalLiteral) visit(context.interval()); Expr expr = intervalLiteral.getValue(); long intervalVal; if (expr instanceof IntLiteral) { intervalVal = ((IntLiteral) expr).getLongValue(); } else { throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(expr.toSql(), "RANGE DESC"), expr.getPos()); } return new MultiRangePartitionDesc( ((StringLiteral) visit(context.string(0))).getStringValue(), ((StringLiteral) visit(context.string(1))).getStringValue(), intervalVal, intervalLiteral.getUnitIdentifier().getDescription(), pos); } else { return new MultiRangePartitionDesc( ((StringLiteral) visit(context.string(0))).getStringValue(), ((StringLiteral) visit(context.string(1))).getStringValue(), Long.parseLong(context.INTEGER_VALUE().getText()), null, pos); } } @Override public ParseNode visitPartitionRangeDesc(StarRocksParser.PartitionRangeDescContext context) { return new PartitionRangeDesc( ((StringLiteral) visit(context.string(0))).getStringValue(), ((StringLiteral) visit(context.string(1))).getStringValue(), createPos(context)); } @Override public ParseNode visitSingleItemListPartitionDesc(StarRocksParser.SingleItemListPartitionDescContext context) { List<String> values = context.stringList().string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()) .collect(toList()); boolean ifNotExists = context.IF() != null; Map<String, String> properties = null; if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new SingleItemListPartitionDesc(ifNotExists, ((Identifier) visit(context.identifier())).getValue(), values, properties, createPos(context)); } @Override public ParseNode visitMultiItemListPartitionDesc(StarRocksParser.MultiItemListPartitionDescContext context) { boolean ifNotExists = context.IF() != null; List<List<String>> multiValues = new ArrayList<>(); for (StarRocksParser.StringListContext stringListContext : context.stringList()) { List<String> values = stringListContext.string().stream().map(c -> ((StringLiteral) visit(c)).getStringValue()) .collect(toList()); multiValues.add(values); } Map<String, String> properties = null; if (context.propertyList() != null) { properties = new HashMap<>(); List<Property> propertyList = visit(context.propertyList().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return new MultiItemListPartitionDesc(ifNotExists, ((Identifier) visit(context.identifier())).getValue(), multiValues, properties, createPos(context)); } @Override public ParseNode visitPartitionKeyDesc(StarRocksParser.PartitionKeyDescContext context) { PartitionKeyDesc partitionKeyDesc; NodePosition pos = createPos(context); if (context.LESS() != null) { if (context.MAXVALUE() != null) { return PartitionKeyDesc.createMaxKeyDesc(); } List<PartitionValue> partitionValueList = visit(context.partitionValueList().get(0).partitionValue(), PartitionValue.class); partitionKeyDesc = new PartitionKeyDesc(partitionValueList, pos); } else { List<PartitionValue> lowerPartitionValueList = visit(context.partitionValueList().get(0).partitionValue(), PartitionValue.class); List<PartitionValue> upperPartitionValueList = visit(context.partitionValueList().get(1).partitionValue(), PartitionValue.class); partitionKeyDesc = new PartitionKeyDesc(lowerPartitionValueList, upperPartitionValueList, pos); } return partitionKeyDesc; } @Override public ParseNode visitPartitionValue(StarRocksParser.PartitionValueContext context) { NodePosition pos = createPos(context); if (context.MAXVALUE() != null) { return PartitionValue.MAX_VALUE; } else { return new PartitionValue(((StringLiteral) visit(context.string())).getStringValue(), pos); } } @Override public ParseNode visitDistributionDesc(StarRocksParser.DistributionDescContext context) { int buckets = 0; NodePosition pos = createPos(context); if (context.INTEGER_VALUE() != null) { buckets = Integer.parseInt(context.INTEGER_VALUE().getText()); } if (context.HASH() != null) { List<Identifier> identifierList = visit(context.identifierList().identifier(), Identifier.class); return new HashDistributionDesc(buckets, identifierList.stream().map(Identifier::getValue).collect(toList()), pos); } else { return new RandomDistributionDesc(buckets, pos); } } @Override public ParseNode visitRefreshSchemeDesc(StarRocksParser.RefreshSchemeDescContext context) { LocalDateTime startTime = LocalDateTime.now(); IntervalLiteral intervalLiteral = null; NodePosition pos = createPos(context); MaterializedView.RefreshMoment refreshMoment = Config.default_mv_refresh_immediate ? MaterializedView.RefreshMoment.IMMEDIATE : MaterializedView.RefreshMoment.DEFERRED; if (context.DEFERRED() != null) { refreshMoment = MaterializedView.RefreshMoment.DEFERRED; } else if (context.IMMEDIATE() != null) { refreshMoment = MaterializedView.RefreshMoment.IMMEDIATE; } if (context.ASYNC() != null) { boolean defineStartTime = false; if (context.START() != null) { NodePosition timePos = createPos(context.string()); StringLiteral stringLiteral = (StringLiteral) visit(context.string()); DateTimeFormatter dateTimeFormatter = null; try { dateTimeFormatter = DateUtils.probeFormat(stringLiteral.getStringValue()); LocalDateTime tempStartTime = DateUtils. parseStringWithDefaultHSM(stringLiteral.getStringValue(), dateTimeFormatter); startTime = tempStartTime; defineStartTime = true; } catch (AnalysisException e) { throw new ParsingException(PARSER_ERROR_MSG.invalidDateFormat(stringLiteral.getStringValue()), timePos); } } if (context.interval() != null) { intervalLiteral = (IntervalLiteral) visit(context.interval()); if (!(intervalLiteral.getValue() instanceof IntLiteral)) { String exprSql = intervalLiteral.getValue().toSql(); throw new ParsingException(PARSER_ERROR_MSG.unsupportedExprWithInfo(exprSql, "INTERVAL"), createPos(context.interval())); } } return new AsyncRefreshSchemeDesc(defineStartTime, startTime, intervalLiteral, refreshMoment, pos); } else if (context.MANUAL() != null) { return new ManualRefreshSchemeDesc(refreshMoment, pos); } else if (context.INCREMENTAL() != null) { return new IncrementalRefreshSchemeDesc(refreshMoment, pos); } return null; } @Override public ParseNode visitProperty(StarRocksParser.PropertyContext context) { return new Property( ((StringLiteral) visit(context.key)).getStringValue(), ((StringLiteral) visit(context.value)).getStringValue(), createPos(context)); } @Override public ParseNode visitOutfile(StarRocksParser.OutfileContext context) { Map<String, String> properties = new HashMap<>(); if (context.properties() != null) { List<Property> propertyList = visit(context.properties().property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } String format = null; if (context.fileFormat() != null) { if (context.fileFormat().identifier() != null) { format = ((Identifier) visit(context.fileFormat().identifier())).getValue(); } else if (context.fileFormat().string() != null) { format = ((StringLiteral) visit(context.fileFormat().string())).getStringValue(); } } return new OutFileClause( ((StringLiteral) visit(context.file)).getStringValue(), format, properties, createPos(context)); } @Override public ParseNode visitColumnNameWithComment(StarRocksParser.ColumnNameWithCommentContext context) { String comment = null; if (context.comment() != null) { comment = ((StringLiteral) visit(context.comment())).getStringValue(); } return new ColWithComment(((Identifier) visit(context.identifier())).getValue(), comment, createPos(context)); } @Override public ParseNode visitIdentifierOrStringOrStar(StarRocksParser.IdentifierOrStringOrStarContext context) { String s = null; if (context.identifier() != null) { return visit(context.identifier()); } else if (context.string() != null) { s = ((StringLiteral) visit(context.string())).getStringValue(); } else if (context.ASTERISK_SYMBOL() != null) { s = "*"; } return new Identifier(s, createPos(context)); } @Override public ParseNode visitIdentifierOrString(StarRocksParser.IdentifierOrStringContext context) { String s = null; if (context.identifier() != null) { return visit(context.identifier()); } else if (context.string() != null) { s = ((StringLiteral) visit(context.string())).getStringValue(); } return new Identifier(s, createPos(context)); } @Override public ParseNode visitUserWithHostAndBlanket(StarRocksParser.UserWithHostAndBlanketContext context) { Identifier user = (Identifier) visit(context.identifierOrString(0)); Identifier host = (Identifier) visit(context.identifierOrString(1)); return new UserIdentity(user.getValue(), host.getValue(), true, createPos(context), false); } @Override public ParseNode visitUserWithHost(StarRocksParser.UserWithHostContext context) { Identifier user = (Identifier) visit(context.identifierOrString(0)); Identifier host = (Identifier) visit(context.identifierOrString(1)); return new UserIdentity(user.getValue(), host.getValue(), false, createPos(context), false); } @Override public ParseNode visitUserWithoutHost(StarRocksParser.UserWithoutHostContext context) { Identifier user = (Identifier) visit(context.identifierOrString()); return new UserIdentity(user.getValue(), "%", false, createPos(context), false); } @Override public ParseNode visitPrepareStatement(StarRocksParser.PrepareStatementContext context) { String stmtName = context.identifier().getText(); StatementBase statement = null; if (context.prepareSql().statement() != null) { statement = (StatementBase) visitStatement(context.prepareSql().statement()); return new PrepareStmt(stmtName, statement, parameters); } else if (context.prepareSql().SINGLE_QUOTED_TEXT() != null) { String sql = context.prepareSql().SINGLE_QUOTED_TEXT().getText(); statement = SqlParser.parseSingleStatement(sql.substring(1, sql.length() - 1), sqlMode); if (null != statement && statement instanceof PrepareStmt) { PrepareStmt prepareStmt = (PrepareStmt) statement; return new PrepareStmt(stmtName, prepareStmt.getInnerStmt(), prepareStmt.getParameters()); } else { return new PrepareStmt(stmtName, statement, ImmutableList.of()); } } throw new ParsingException("error prepare sql"); } @Override public ParseNode visitDeallocateStatement(StarRocksParser.DeallocateStatementContext ctx) { return new DeallocateStmt(ctx.identifier().getText()); } @Override public ParseNode visitExecuteStatement(StarRocksParser.ExecuteStatementContext context) { String stmtName = context.identifier().getText(); List<StarRocksParser.IdentifierOrStringContext> queryStatementContext = context.identifierOrString(); List<Expr> variableExprs = new ArrayList<>(); if (context.identifierOrString() != null) { queryStatementContext.forEach(varNameContext -> { Identifier identifier = (Identifier) visit(varNameContext); variableExprs.add(new VariableExpr(identifier.getValue(), SetType.USER)); }); } return new ExecuteStmt(stmtName, variableExprs); } @Override public ParseNode visitParameter(StarRocksParser.ParameterContext ctx) { if (parameters == null) { parameters = new ArrayList<>(); } Parameter parameter = new Parameter(placeHolderSlotId++); parameters.add(parameter); return parameter; } @Override public ParseNode visitDecommissionDiskClause(StarRocksParser.DecommissionDiskClauseContext context) { throw new SemanticException("not support"); } @Override public ParseNode visitCancelDecommissionDiskClause(StarRocksParser.CancelDecommissionDiskClauseContext context) { throw new SemanticException("not support"); } @Override public ParseNode visitDisableDiskClause(StarRocksParser.DisableDiskClauseContext context) { throw new SemanticException("not support"); } @Override public ParseNode visitCancelDisableDiskClause(StarRocksParser.CancelDisableDiskClauseContext context) { throw new SemanticException("not support"); } private <T> List<T> visit(List<? extends ParserRuleContext> contexts, Class<T> clazz) { return contexts.stream() .map(this::visit) .map(clazz::cast) .collect(toList()); } private <T> List<T> visitIfPresent(List<? extends ParserRuleContext> contexts, Class<T> clazz) { if (contexts != null && contexts.size() != 0) { return contexts.stream() .map(this::visit) .map(clazz::cast) .collect(toList()); } else { return null; } } private ParseNode visitIfPresent(ParserRuleContext context) { if (context != null) { return visit(context); } else { return null; } } private FunctionArgsDef getFunctionArgsDef(StarRocksParser.TypeListContext typeList) { List<TypeDef> typeDefList = new ArrayList<>(); for (StarRocksParser.TypeContext typeContext : typeList.type()) { typeDefList.add(new TypeDef(getType(typeContext))); } boolean isVariadic = typeList.DOTDOTDOT() != null; return new FunctionArgsDef(typeDefList, isVariadic); } private String getIdentifierName(StarRocksParser.IdentifierContext context) { return ((Identifier) visit(context)).getValue(); } private QualifiedName getQualifiedName(StarRocksParser.QualifiedNameContext context) { List<String> parts = new ArrayList<>(); NodePosition pos = createPos(context); for (ParseTree c : context.children) { if (c instanceof TerminalNode) { TerminalNode t = (TerminalNode) c; if (t.getSymbol().getType() == StarRocksParser.DOT_IDENTIFIER) { parts.add(t.getText().substring(1)); } } else if (c instanceof StarRocksParser.IdentifierContext) { StarRocksParser.IdentifierContext identifierContext = (StarRocksParser.IdentifierContext) c; Identifier identifier = (Identifier) visit(identifierContext); parts.add(identifier.getValue()); } } return QualifiedName.of(parts, pos); } private TaskName qualifiedNameToTaskName(QualifiedName qualifiedName) { List<String> parts = qualifiedName.getParts(); if (parts.size() == 2) { return new TaskName(parts.get(0), parts.get(1), qualifiedName.getPos()); } else if (parts.size() == 1) { return new TaskName(null, parts.get(0), qualifiedName.getPos()); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidTaskFormat(qualifiedName.toString()), qualifiedName.getPos()); } } private TableName qualifiedNameToTableName(QualifiedName qualifiedName) { List<String> parts = qualifiedName.getParts(); if (parts.size() == 3) { return new TableName(parts.get(0), parts.get(1), parts.get(2), qualifiedName.getPos()); } else if (parts.size() == 2) { return new TableName(null, qualifiedName.getParts().get(0), qualifiedName.getParts().get(1), qualifiedName.getPos()); } else if (parts.size() == 1) { return new TableName(null, null, qualifiedName.getParts().get(0), qualifiedName.getPos()); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidTableFormat(qualifiedName.toString())); } } public Type getType(StarRocksParser.TypeContext context) { if (context.baseType() != null) { return getBaseType(context.baseType()); } else if (context.decimalType() != null) { return getDecimalType(context.decimalType()); } else if (context.arrayType() != null) { return getArrayType(context.arrayType()); } else if (context.structType() != null) { return getStructType(context.structType()); } else { return getMapType(context.mapType()); } } private Type getBaseType(StarRocksParser.BaseTypeContext context) { int length = -1; if (context.typeParameter() != null) { length = Integer.parseInt(context.typeParameter().INTEGER_VALUE().toString()); } if (context.STRING() != null || context.TEXT() != null) { ScalarType type = ScalarType.createVarcharType(ScalarType.DEFAULT_STRING_LENGTH); return type; } else if (context.VARCHAR() != null) { ScalarType type = ScalarType.createVarcharType(length); return type; } else if (context.CHAR() != null) { ScalarType type = ScalarType.createCharType(length); return type; } else if (context.SIGNED() != null) { return Type.INT; } else if (context.HLL() != null) { ScalarType type = ScalarType.createHllType(); return type; } else if (context.BINARY() != null || context.VARBINARY() != null) { ScalarType type = ScalarType.createVarbinary(length); return type; } else { return ScalarType.createType(context.getChild(0).getText()); } } public ScalarType getDecimalType(StarRocksParser.DecimalTypeContext context) { Integer precision = null; Integer scale = null; if (context.precision != null) { precision = Integer.parseInt(context.precision.getText()); if (context.scale != null) { scale = Integer.parseInt(context.scale.getText()); } } if (context.DECIMAL() != null || context.NUMBER() != null || context.NUMERIC() != null) { if (precision != null) { if (scale != null) { return ScalarType.createUnifiedDecimalType(precision, scale); } return ScalarType.createUnifiedDecimalType(precision); } return ScalarType.createUnifiedDecimalType(10, 0); } else if (context.DECIMAL32() != null || context.DECIMAL64() != null || context.DECIMAL128() != null) { try { ScalarType.checkEnableDecimalV3(); } catch (AnalysisException e) { throw new SemanticException(e.getMessage()); } final PrimitiveType primitiveType = PrimitiveType.valueOf(context.children.get(0).getText().toUpperCase()); if (precision != null) { if (scale != null) { return ScalarType.createDecimalV3Type(primitiveType, precision, scale); } return ScalarType.createDecimalV3Type(primitiveType, precision); } return ScalarType.createDecimalV3Type(primitiveType); } else if (context.DECIMALV2() != null) { if (precision != null) { if (scale != null) { return ScalarType.createDecimalV2Type(precision, scale); } return ScalarType.createDecimalV2Type(precision); } return ScalarType.createDecimalV2Type(); } else { throw new IllegalArgumentException("Unsupported type " + context.getText()); } } public ArrayType getArrayType(StarRocksParser.ArrayTypeContext context) { return new ArrayType(getType(context.type())); } public StructType getStructType(StarRocksParser.StructTypeContext context) { ArrayList<StructField> fields = new ArrayList<>(); List<StarRocksParser.SubfieldDescContext> subfields = context.subfieldDescs().subfieldDesc(); for (StarRocksParser.SubfieldDescContext type : subfields) { Identifier fieldIdentifier = (Identifier) visit(type.identifier()); String fieldName = fieldIdentifier.getValue(); fields.add(new StructField(fieldName, getType(type.type()), null)); } return new StructType(fields); } public MapType getMapType(StarRocksParser.MapTypeContext context) { Type keyType = getType(context.type(0)); if (!keyType.isValidMapKeyType()) { throw new ParsingException(PARSER_ERROR_MSG.unsupportedType(keyType.toString(), "for map's key, which should be base types"), createPos(context.type(0))); } Type valueType = getType(context.type(1)); return new MapType(keyType, valueType); } private LabelName qualifiedNameToLabelName(QualifiedName qualifiedName) { List<String> parts = qualifiedName.getParts(); if (parts.size() == 2) { return new LabelName(parts.get(0), parts.get(1), qualifiedName.getPos()); } else if (parts.size() == 1) { return new LabelName(null, parts.get(0), qualifiedName.getPos()); } else { throw new ParsingException(PARSER_ERROR_MSG.invalidTableFormat(qualifiedName.toString()), qualifiedName.getPos()); } } private Map<String, String> getProperties(StarRocksParser.PropertiesContext context) { Map<String, String> properties = new HashMap<>(); if (context != null && context.property() != null) { List<Property> propertyList = visit(context.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return properties; } private Map<String, String> getPropertyList(StarRocksParser.PropertyListContext context) { Map<String, String> properties = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); if (context != null && context.property() != null) { List<Property> propertyList = visit(context.property(), Property.class); for (Property property : propertyList) { properties.put(property.getKey(), property.getValue()); } } return properties; } private List<ParseNode> getLoadPropertyList(List<StarRocksParser.LoadPropertiesContext> loadPropertiesContexts) { List<ParseNode> loadPropertyList = new ArrayList<>(); Preconditions.checkNotNull(loadPropertiesContexts, "load properties is null"); for (StarRocksParser.LoadPropertiesContext loadPropertiesContext : loadPropertiesContexts) { if (loadPropertiesContext.colSeparatorProperty() != null) { StringLiteral literal = (StringLiteral) visit(loadPropertiesContext.colSeparatorProperty().string()); loadPropertyList.add(new ColumnSeparator(literal.getValue(), literal.getPos())); } if (loadPropertiesContext.rowDelimiterProperty() != null) { StringLiteral literal = (StringLiteral) visit(loadPropertiesContext.rowDelimiterProperty().string()); loadPropertyList.add(new RowDelimiter(literal.getValue(), literal.getPos())); } if (loadPropertiesContext.importColumns() != null) { ImportColumnsStmt importColumnsStmt = (ImportColumnsStmt) visit(loadPropertiesContext.importColumns()); loadPropertyList.add(importColumnsStmt); } if (loadPropertiesContext.expression() != null) { Expr where = (Expr) visit(loadPropertiesContext.expression()); loadPropertyList.add(new ImportWhereStmt(where, where.getPos())); } if (loadPropertiesContext.partitionNames() != null) { loadPropertyList.add(visit(loadPropertiesContext.partitionNames())); } } return loadPropertyList; } @Override public ParseNode visitImportColumns(StarRocksParser.ImportColumnsContext importColumnsContext) { List<ImportColumnDesc> columns = new ArrayList<>(); for (StarRocksParser.QualifiedNameContext qualifiedNameContext : importColumnsContext.columnProperties().qualifiedName()) { String column = ((Identifier) (visit(qualifiedNameContext))).getValue(); ImportColumnDesc columnDesc = new ImportColumnDesc(column, null, createPos(qualifiedNameContext)); columns.add(columnDesc); } for (StarRocksParser.AssignmentContext assignmentContext : importColumnsContext.columnProperties().assignment()) { ColumnAssignment columnAssignment = (ColumnAssignment) (visit(assignmentContext)); Expr expr = columnAssignment.getExpr(); ImportColumnDesc columnDesc = new ImportColumnDesc(columnAssignment.getColumn(), expr, createPos(assignmentContext)); columns.add(columnDesc); } return new ImportColumnsStmt(columns, createPos(importColumnsContext)); } private Map<String, String> getJobProperties(StarRocksParser.JobPropertiesContext jobPropertiesContext) { Map<String, String> jobProperties = new HashMap<>(); if (jobPropertiesContext != null) { List<Property> propertyList = visit(jobPropertiesContext.properties().property(), Property.class); for (Property property : propertyList) { jobProperties.put(property.getKey(), property.getValue()); } } return jobProperties; } private Map<String, String> getDataSourceProperties( StarRocksParser.DataSourcePropertiesContext dataSourcePropertiesContext) { Map<String, String> dataSourceProperties = new HashMap<>(); if (dataSourcePropertiesContext != null) { List<Property> propertyList = visit(dataSourcePropertiesContext.propertyList().property(), Property.class); for (Property property : propertyList) { dataSourceProperties.put(property.getKey(), property.getValue()); } } return dataSourceProperties; } public List<String> getColumnNames(StarRocksParser.ColumnAliasesContext context) { if (context == null) { return null; } List<Identifier> targetColumnNamesIdentifiers = visitIfPresent(context.identifier(), Identifier.class); if (targetColumnNamesIdentifiers != null) { return targetColumnNamesIdentifiers.stream() .map(Identifier::getValue).map(String::toLowerCase).collect(toList()); } else { return null; } } private NodePosition createPos(ParserRuleContext context) { return createPos(context.start, context.stop); } private NodePosition createPos(Token start, Token stop) { if (start == null) { return NodePosition.ZERO; } if (stop == null) { return new NodePosition(start.getLine(), start.getCharPositionInLine()); } return new NodePosition(start, stop); } private LabelName createLabelName(StarRocksParser.QualifiedNameContext dbCtx, StarRocksParser.IdentifierContext nameCtx) { Token start = null; Token stop = null; String name = null; if (nameCtx != null) { name = getIdentifierName(nameCtx); start = nameCtx.start; stop = nameCtx.stop; } String dbName = null; if (dbCtx != null) { dbName = getQualifiedName(dbCtx).toString(); start = dbCtx.start; } return new LabelName(dbName, name, createPos(start, stop)); } private List<HintNode> extractQueryScopeHintNode() { List<HintNode> res = Lists.newArrayList(); for (Map.Entry<ParserRuleContext, List<HintNode>> entry : hintMap.entrySet()) { for (HintNode hintNode : entry.getValue()) { if (hintNode.getScope() == HintNode.Scope.QUERY) { res.add(hintNode); } } } Collections.sort(res); return res; } }
Good suggestion. Fixed.
private ImportedModel convertToOnnxAndImport(String modelName, String modelDir) { Path tempDir = null; try { tempDir = Files.createTempDirectory("tf2onnx"); String convertedPath = tempDir.toString() + File.separatorChar + "converted.onnx"; for (int opset : onnxOpsetsToTry) { log.info("Converting TensorFlow model '" + modelDir + "' to ONNX with opset " + opset + "..."); Pair<Integer, String> res = convertToOnnx(modelDir, convertedPath, opset); if (res.getFirst() == 0) { log.info("Conversion to ONNX with opset " + opset + " successful."); return onnxImporter.importModel(modelName, convertedPath); } log.info("Conversion to ONNX with opset " + opset + " failed. Reason: " + res.getSecond()); } throw new IllegalArgumentException("Unable to convert TensorFlow model in '" + modelDir + "' to ONNX."); } catch (IOException e) { throw new IllegalArgumentException("Conversion from TensorFlow to ONNX failed for '" + modelDir + "'"); } finally { if (tempDir != null) { IOUtils.recursiveDeleteDir(tempDir.toFile()); } } }
throw new IllegalArgumentException("Unable to convert TensorFlow model in '" + modelDir + "' to ONNX.");
private ImportedModel convertToOnnxAndImport(String modelName, String modelDir) { Path tempDir = null; try { tempDir = Files.createTempDirectory("tf2onnx"); String convertedPath = tempDir.toString() + File.separatorChar + "converted.onnx"; String outputOfLastConversionAttempt = ""; for (int opset : onnxOpsetsToTry) { log.info("Converting TensorFlow model '" + modelDir + "' to ONNX with opset " + opset + "..."); Pair<Integer, String> res = convertToOnnx(modelDir, convertedPath, opset); if (res.getFirst() == 0) { log.info("Conversion to ONNX with opset " + opset + " successful."); return onnxImporter.importModel(modelName, convertedPath); } log.fine("Conversion to ONNX with opset " + opset + " failed. Reason: " + res.getSecond()); outputOfLastConversionAttempt = res.getSecond(); } throw new IllegalArgumentException("Unable to convert TensorFlow model in '" + modelDir + "' to ONNX. " + "Reason: " + outputOfLastConversionAttempt); } catch (IOException e) { throw new IllegalArgumentException("Conversion from TensorFlow to ONNX failed for '" + modelDir + "'"); } finally { if (tempDir != null) { IOUtils.recursiveDeleteDir(tempDir.toFile()); } } }
class TensorFlowImporter extends ModelImporter { private static final Logger log = Logger.getLogger(TensorFlowImporter.class.getName()); private final static int[] onnxOpsetsToTry = {8, 10, 12}; private final OnnxImporter onnxImporter = new OnnxImporter(); @Override public boolean canImport(String modelPath) { File modelDir = new File(modelPath); if ( ! modelDir.isDirectory()) return false; for (File file : modelDir.listFiles()) { if (file.toString().endsWith(".pbtxt")) return true; if (file.toString().endsWith(".pb")) return true; } return false; } /** * Imports a saved TensorFlow model from a directory. * The model should be saved as a .pbtxt or .pb file. * * @param modelName the name of the model to import, consisting of characters in [A-Za-z0-9_] * @param modelDir the directory containing the TensorFlow model files to import */ @Override public ImportedModel importModel(String modelName, String modelDir) { return convertToOnnxAndImport(modelName, modelDir); } /** Imports a TensorFlow model - DEPRECATED */ public ImportedModel importModel(String modelName, String modelDir, SavedModelBundle model) { try { IntermediateGraph graph = GraphImporter.importGraph(modelName, model); return convertIntermediateGraphToModel(graph, modelDir); } catch (IOException e) { throw new IllegalArgumentException("Could not import TensorFlow model '" + model + "'", e); } } private Pair<Integer, String> convertToOnnx(String savedModel, String output, int opset) throws IOException { ProcessExecuter executer = new ProcessExecuter(); String job = "vespa-convert-tf2onnx --saved-model " + savedModel + " --output " + output + " --opset " + opset; return executer.exec(job); } }
class TensorFlowImporter extends ModelImporter { private static final Logger log = Logger.getLogger(TensorFlowImporter.class.getName()); private final static int[] onnxOpsetsToTry = {8, 10, 12}; private final OnnxImporter onnxImporter = new OnnxImporter(); @Override public boolean canImport(String modelPath) { File modelDir = new File(modelPath); if ( ! modelDir.isDirectory()) return false; for (File file : modelDir.listFiles()) { if (file.toString().endsWith(".pbtxt")) return true; if (file.toString().endsWith(".pb")) return true; } return false; } /** * Imports a saved TensorFlow model from a directory. * The model should be saved as a .pbtxt or .pb file. * * @param modelName the name of the model to import, consisting of characters in [A-Za-z0-9_] * @param modelDir the directory containing the TensorFlow model files to import */ @Override public ImportedModel importModel(String modelName, String modelDir) { return convertToOnnxAndImport(modelName, modelDir); } /** Imports a TensorFlow model - DEPRECATED */ public ImportedModel importModel(String modelName, String modelDir, SavedModelBundle model) { try { IntermediateGraph graph = GraphImporter.importGraph(modelName, model); return convertIntermediateGraphToModel(graph, modelDir); } catch (IOException e) { throw new IllegalArgumentException("Could not import TensorFlow model '" + model + "'", e); } } private Pair<Integer, String> convertToOnnx(String savedModel, String output, int opset) throws IOException { ProcessExecuter executer = new ProcessExecuter(); String job = "vespa-convert-tf2onnx --saved-model " + savedModel + " --output " + output + " --opset " + opset; return executer.exec(job); } }
Fixed.
private ImportedModel convertToOnnxAndImport(String modelName, String modelDir) { Path tempDir = null; try { tempDir = Files.createTempDirectory("tf2onnx"); String convertedPath = tempDir.toString() + File.separatorChar + "converted.onnx"; for (int opset : onnxOpsetsToTry) { log.info("Converting TensorFlow model '" + modelDir + "' to ONNX with opset " + opset + "..."); Pair<Integer, String> res = convertToOnnx(modelDir, convertedPath, opset); if (res.getFirst() == 0) { log.info("Conversion to ONNX with opset " + opset + " successful."); return onnxImporter.importModel(modelName, convertedPath); } log.info("Conversion to ONNX with opset " + opset + " failed. Reason: " + res.getSecond()); } throw new IllegalArgumentException("Unable to convert TensorFlow model in '" + modelDir + "' to ONNX."); } catch (IOException e) { throw new IllegalArgumentException("Conversion from TensorFlow to ONNX failed for '" + modelDir + "'"); } finally { if (tempDir != null) { IOUtils.recursiveDeleteDir(tempDir.toFile()); } } }
log.info("Conversion to ONNX with opset " + opset + " failed. Reason: " + res.getSecond());
private ImportedModel convertToOnnxAndImport(String modelName, String modelDir) { Path tempDir = null; try { tempDir = Files.createTempDirectory("tf2onnx"); String convertedPath = tempDir.toString() + File.separatorChar + "converted.onnx"; String outputOfLastConversionAttempt = ""; for (int opset : onnxOpsetsToTry) { log.info("Converting TensorFlow model '" + modelDir + "' to ONNX with opset " + opset + "..."); Pair<Integer, String> res = convertToOnnx(modelDir, convertedPath, opset); if (res.getFirst() == 0) { log.info("Conversion to ONNX with opset " + opset + " successful."); return onnxImporter.importModel(modelName, convertedPath); } log.fine("Conversion to ONNX with opset " + opset + " failed. Reason: " + res.getSecond()); outputOfLastConversionAttempt = res.getSecond(); } throw new IllegalArgumentException("Unable to convert TensorFlow model in '" + modelDir + "' to ONNX. " + "Reason: " + outputOfLastConversionAttempt); } catch (IOException e) { throw new IllegalArgumentException("Conversion from TensorFlow to ONNX failed for '" + modelDir + "'"); } finally { if (tempDir != null) { IOUtils.recursiveDeleteDir(tempDir.toFile()); } } }
class TensorFlowImporter extends ModelImporter { private static final Logger log = Logger.getLogger(TensorFlowImporter.class.getName()); private final static int[] onnxOpsetsToTry = {8, 10, 12}; private final OnnxImporter onnxImporter = new OnnxImporter(); @Override public boolean canImport(String modelPath) { File modelDir = new File(modelPath); if ( ! modelDir.isDirectory()) return false; for (File file : modelDir.listFiles()) { if (file.toString().endsWith(".pbtxt")) return true; if (file.toString().endsWith(".pb")) return true; } return false; } /** * Imports a saved TensorFlow model from a directory. * The model should be saved as a .pbtxt or .pb file. * * @param modelName the name of the model to import, consisting of characters in [A-Za-z0-9_] * @param modelDir the directory containing the TensorFlow model files to import */ @Override public ImportedModel importModel(String modelName, String modelDir) { return convertToOnnxAndImport(modelName, modelDir); } /** Imports a TensorFlow model - DEPRECATED */ public ImportedModel importModel(String modelName, String modelDir, SavedModelBundle model) { try { IntermediateGraph graph = GraphImporter.importGraph(modelName, model); return convertIntermediateGraphToModel(graph, modelDir); } catch (IOException e) { throw new IllegalArgumentException("Could not import TensorFlow model '" + model + "'", e); } } private Pair<Integer, String> convertToOnnx(String savedModel, String output, int opset) throws IOException { ProcessExecuter executer = new ProcessExecuter(); String job = "vespa-convert-tf2onnx --saved-model " + savedModel + " --output " + output + " --opset " + opset; return executer.exec(job); } }
class TensorFlowImporter extends ModelImporter { private static final Logger log = Logger.getLogger(TensorFlowImporter.class.getName()); private final static int[] onnxOpsetsToTry = {8, 10, 12}; private final OnnxImporter onnxImporter = new OnnxImporter(); @Override public boolean canImport(String modelPath) { File modelDir = new File(modelPath); if ( ! modelDir.isDirectory()) return false; for (File file : modelDir.listFiles()) { if (file.toString().endsWith(".pbtxt")) return true; if (file.toString().endsWith(".pb")) return true; } return false; } /** * Imports a saved TensorFlow model from a directory. * The model should be saved as a .pbtxt or .pb file. * * @param modelName the name of the model to import, consisting of characters in [A-Za-z0-9_] * @param modelDir the directory containing the TensorFlow model files to import */ @Override public ImportedModel importModel(String modelName, String modelDir) { return convertToOnnxAndImport(modelName, modelDir); } /** Imports a TensorFlow model - DEPRECATED */ public ImportedModel importModel(String modelName, String modelDir, SavedModelBundle model) { try { IntermediateGraph graph = GraphImporter.importGraph(modelName, model); return convertIntermediateGraphToModel(graph, modelDir); } catch (IOException e) { throw new IllegalArgumentException("Could not import TensorFlow model '" + model + "'", e); } } private Pair<Integer, String> convertToOnnx(String savedModel, String output, int opset) throws IOException { ProcessExecuter executer = new ProcessExecuter(); String job = "vespa-convert-tf2onnx --saved-model " + savedModel + " --output " + output + " --opset " + opset; return executer.exec(job); } }
If we want to we could exit, but I'm not sure it's always the best thing to do since there could be a fix to the problem rolling out simultaneously, we could be feeding to multiple clusters etc. I think in general this client wants to keep trying until timeout, for better or worse.
private ConnectionState cycle(ConnectionState connectionState) { switch(connectionState) { case DISCONNECTED: try { if (! currentConnection.connect()) { log.log(Level.WARNING, "Could not connect to endpoint: '" + endpoint + "'. Will re-try."); drainFirstDocumentsInQueueIfOld(); return ConnectionState.DISCONNECTED; } return ConnectionState.CONNECTED; } catch (Throwable throwable1) { drainFirstDocumentsInQueueIfOld(); log.log(Level.INFO, "Failed connecting to endpoint: '" + endpoint + "'. Will re-try connecting.", throwable1); executeProblemsCounter.incrementAndGet(); return ConnectionState.DISCONNECTED; } case CONNECTED: try { if (isStale(currentConnection)) return refreshConnection(connectionState); currentConnection.handshake(); successfulHandshakes.getAndIncrement(); } catch (ServerResponseException ser) { int code = ser.getResponseCode(); if (code == 401 || code == 403) { drainDocumentQueueWhenFailingPermanently(new Exception("Denied access by endpoint:" + ser.getResponseString())); log.log(Level.SEVERE, "Failed authentication or authorization with '" + endpoint + "': " + Exceptions.toMessageString(ser)); return ConnectionState.CONNECTED; } executeProblemsCounter.incrementAndGet(); log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint + "' failed -- will re-try handshake: " + Exceptions.toMessageString(ser)); drainFirstDocumentsInQueueIfOld(); resultQueue.onEndpointError(new FeedProtocolException(ser.getResponseCode(), ser.getResponseString(), ser, endpoint)); return ConnectionState.CONNECTED; } catch (Throwable throwable) { executeProblemsCounter.incrementAndGet(); resultQueue.onEndpointError(new FeedConnectException(throwable, endpoint)); log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint + "' failed. Will re-try handshake.", throwable); drainFirstDocumentsInQueueIfOld(); currentConnection.close(); return ConnectionState.DISCONNECTED; } return ConnectionState.SESSION_SYNCED; case SESSION_SYNCED: try { if (isStale(currentConnection)) return refreshConnection(connectionState); ProcessResponse processResponse = pullAndProcessData(pollIntervalUS); gatewayThrottler.handleCall(processResponse.transitiveErrorCount); } catch (ServerResponseException ser) { log.log(Level.INFO, "Problems while handing data over to endpoint '" + endpoint + "'. Will re-try. Endpoint responded with an unexpected HTTP response code.", ser); return ConnectionState.CONNECTED; } catch (Throwable e) { log.log(Level.INFO, "Connection level error handing data over to endpoint '" + endpoint + "'. Will re-try.", e); currentConnection.close(); return ConnectionState.DISCONNECTED; } return ConnectionState.SESSION_SYNCED; default: { log.severe("Should never get here."); currentConnection.close(); return ConnectionState.DISCONNECTED; } } }
return ConnectionState.CONNECTED;
private ConnectionState cycle(ConnectionState connectionState) { switch(connectionState) { case DISCONNECTED: try { if (! currentConnection.connect()) { log.log(Level.WARNING, "Could not connect to endpoint: '" + endpoint + "'. Will re-try."); drainFirstDocumentsInQueueIfOld(); return ConnectionState.DISCONNECTED; } return ConnectionState.CONNECTED; } catch (Throwable throwable1) { drainFirstDocumentsInQueueIfOld(); log.log(Level.INFO, "Failed connecting to endpoint: '" + endpoint + "'. Will re-try connecting.", throwable1); executeProblemsCounter.incrementAndGet(); return ConnectionState.DISCONNECTED; } case CONNECTED: try { if (isStale(currentConnection)) return refreshConnection(connectionState); currentConnection.handshake(); successfulHandshakes.getAndIncrement(); } catch (ServerResponseException ser) { int code = ser.getResponseCode(); if (code == 401 || code == 403) { drainDocumentQueueWhenFailingPermanently(new Exception("Denied access by endpoint:" + ser.getResponseString())); log.log(Level.SEVERE, "Failed authentication or authorization with '" + endpoint + "': " + Exceptions.toMessageString(ser)); return ConnectionState.CONNECTED; } executeProblemsCounter.incrementAndGet(); log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint + "' failed -- will re-try handshake: " + Exceptions.toMessageString(ser)); drainFirstDocumentsInQueueIfOld(); resultQueue.onEndpointError(new FeedProtocolException(ser.getResponseCode(), ser.getResponseString(), ser, endpoint)); return ConnectionState.CONNECTED; } catch (Throwable throwable) { executeProblemsCounter.incrementAndGet(); resultQueue.onEndpointError(new FeedConnectException(throwable, endpoint)); log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint + "' failed. Will re-try handshake.", throwable); drainFirstDocumentsInQueueIfOld(); currentConnection.close(); return ConnectionState.DISCONNECTED; } return ConnectionState.SESSION_SYNCED; case SESSION_SYNCED: try { if (isStale(currentConnection)) return refreshConnection(connectionState); ProcessResponse processResponse = pullAndProcessData(pollIntervalUS); gatewayThrottler.handleCall(processResponse.transitiveErrorCount); } catch (ServerResponseException ser) { log.log(Level.INFO, "Problems while handing data over to endpoint '" + endpoint + "'. Will re-try. Endpoint responded with an unexpected HTTP response code.", ser); return ConnectionState.CONNECTED; } catch (Throwable e) { log.log(Level.INFO, "Connection level error handing data over to endpoint '" + endpoint + "'. Will re-try.", e); currentConnection.close(); return ConnectionState.DISCONNECTED; } return ConnectionState.SESSION_SYNCED; default: { log.severe("Should never get here."); currentConnection.close(); return ConnectionState.DISCONNECTED; } } }
class ProcessResponse { private final int transitiveErrorCount; private final int processResultsCount; ProcessResponse(int transitiveErrorCount, int processResultsCount) { this.transitiveErrorCount = transitiveErrorCount; this.processResultsCount = processResultsCount; } }
class ProcessResponse { private final int transitiveErrorCount; private final int processResultsCount; ProcessResponse(int transitiveErrorCount, int processResultsCount) { this.transitiveErrorCount = transitiveErrorCount; this.processResultsCount = processResultsCount; } }
Thanks for dissolving my math! I just wrote down the function I had in mind :stuck_out_tongue_closed_eyes:
private boolean timeToPoll(GatewayConnection connection) { if (connection.lastPollTime() == null) return true; double connectionEndOfLife = connection.connectionTime().plus(connectionTimeToLive).toEpochMilli(); double connectionLastPolled = connection.lastPollTime().toEpochMilli(); return clock.millis() - connectionEndOfLife > 2 * (connectionLastPolled - connectionEndOfLife); }
return clock.millis() - connectionEndOfLife > 2 * (connectionLastPolled - connectionEndOfLife);
private boolean timeToPoll(GatewayConnection connection) { if (connection.lastPollTime() == null) return true; double connectionEndOfLife = connection.connectionTime().plus(connectionTimeToLive).toEpochMilli(); double connectionLastPolled = connection.lastPollTime().toEpochMilli(); return clock.millis() - connectionEndOfLife > 2 * (connectionLastPolled - connectionEndOfLife); }
class OldConnectionsDrainer implements Runnable { private final Endpoint endpoint; private final int clusterId; private final long pollIntervalUS; private final Duration connectionTimeToLive; private final Duration localQueueTimeOut; private final AtomicInteger statusReceivedCounter; private final EndpointResultQueue resultQueue; private final CountDownLatch stopSignal; private final Clock clock; /** * Previous connections on which we may have sent operations and are still waiting for the results * All connections in this are in state SESSION_SYNCED. */ private final List<GatewayConnection> connections = new CopyOnWriteArrayList<>(); OldConnectionsDrainer(Endpoint endpoint, int clusterId, long pollIntervalUS, Duration connectionTimeToLive, Duration localQueueTimeOut, AtomicInteger statusReceivedCounter, EndpointResultQueue resultQueue, CountDownLatch stopSignal, Clock clock) { this.endpoint = endpoint; this.clusterId = clusterId; this.pollIntervalUS = pollIntervalUS; this.connectionTimeToLive = connectionTimeToLive; this.localQueueTimeOut = localQueueTimeOut; this.statusReceivedCounter = statusReceivedCounter; this.resultQueue = resultQueue; this.stopSignal = stopSignal; this.clock = clock; } /** Add another old connection to this for draining */ public void add(GatewayConnection connection) { connections.add(connection); } @Override public void run() { while (stopSignal.getCount() > 0) { checkOldConnections(); try { Thread.sleep(pollIntervalUS/1000); } catch (InterruptedException e) { } } } public void checkOldConnections() { List<GatewayConnection> toRemove = null; for (GatewayConnection connection : connections) { if (closingTime(connection).isBefore(clock.instant())) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); connection.close(); if (toRemove == null) toRemove = new ArrayList<>(1); toRemove.add(connection); } catch (Exception e) { } } else if (timeToPoll(connection)) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); } catch (Exception e) { } } } if (toRemove != null) connections.removeAll(toRemove); } private Instant closingTime(GatewayConnection connection) { return connection.connectionTime().plus(connectionTimeToLive).plus(localQueueTimeOut); } private void close() { int size = resultQueue.getPendingSize(); if (size > 0) { log.info("We have outstanding operations (" + size + ") , trying to fetch responses."); for (GatewayConnection connection : connections) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); } catch (Throwable e) { log.log(Level.SEVERE, "Some failures while trying to get latest responses from vespa.", e); } } } for (GatewayConnection oldConnection : connections) oldConnection.close(); } /** For testing. Returns the old connections of this. */ public List<GatewayConnection> connections() { return Collections.unmodifiableList(connections); } }
class OldConnectionsDrainer implements Runnable { private final Endpoint endpoint; private final int clusterId; private final long pollIntervalUS; private final Duration connectionTimeToLive; private final Duration localQueueTimeOut; private final AtomicInteger statusReceivedCounter; private final EndpointResultQueue resultQueue; private final CountDownLatch stopSignal; private final Clock clock; /** * Previous connections on which we may have sent operations and are still waiting for the results * All connections in this are in state SESSION_SYNCED. */ private final List<GatewayConnection> connections = new CopyOnWriteArrayList<>(); OldConnectionsDrainer(Endpoint endpoint, int clusterId, long pollIntervalUS, Duration connectionTimeToLive, Duration localQueueTimeOut, AtomicInteger statusReceivedCounter, EndpointResultQueue resultQueue, CountDownLatch stopSignal, Clock clock) { this.endpoint = endpoint; this.clusterId = clusterId; this.pollIntervalUS = pollIntervalUS; this.connectionTimeToLive = connectionTimeToLive; this.localQueueTimeOut = localQueueTimeOut; this.statusReceivedCounter = statusReceivedCounter; this.resultQueue = resultQueue; this.stopSignal = stopSignal; this.clock = clock; } /** Add another old connection to this for draining */ public void add(GatewayConnection connection) { connections.add(connection); } @Override public void run() { while (stopSignal.getCount() > 0) { checkOldConnections(); try { Thread.sleep(pollIntervalUS/1000); } catch (InterruptedException e) { } } } public void checkOldConnections() { List<GatewayConnection> toRemove = null; for (GatewayConnection connection : connections) { if (closingTime(connection).isBefore(clock.instant())) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); connection.close(); if (toRemove == null) toRemove = new ArrayList<>(1); toRemove.add(connection); } catch (Exception e) { } } else if (timeToPoll(connection)) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); } catch (Exception e) { } } } if (toRemove != null) connections.removeAll(toRemove); } private Instant closingTime(GatewayConnection connection) { return connection.connectionTime().plus(connectionTimeToLive).plus(localQueueTimeOut); } private void close() { int size = resultQueue.getPendingSize(); if (size > 0) { log.info("We have outstanding operations (" + size + ") , trying to fetch responses."); for (GatewayConnection connection : connections) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); } catch (Throwable e) { log.log(Level.SEVERE, "Some failures while trying to get latest responses from vespa.", e); } } } for (GatewayConnection oldConnection : connections) oldConnection.close(); } /** For testing. Returns the old connections of this. */ public List<GatewayConnection> connections() { return Collections.unmodifiableList(connections); } }
😜
private boolean timeToPoll(GatewayConnection connection) { if (connection.lastPollTime() == null) return true; double connectionEndOfLife = connection.connectionTime().plus(connectionTimeToLive).toEpochMilli(); double connectionLastPolled = connection.lastPollTime().toEpochMilli(); return clock.millis() - connectionEndOfLife > 2 * (connectionLastPolled - connectionEndOfLife); }
return clock.millis() - connectionEndOfLife > 2 * (connectionLastPolled - connectionEndOfLife);
private boolean timeToPoll(GatewayConnection connection) { if (connection.lastPollTime() == null) return true; double connectionEndOfLife = connection.connectionTime().plus(connectionTimeToLive).toEpochMilli(); double connectionLastPolled = connection.lastPollTime().toEpochMilli(); return clock.millis() - connectionEndOfLife > 2 * (connectionLastPolled - connectionEndOfLife); }
class OldConnectionsDrainer implements Runnable { private final Endpoint endpoint; private final int clusterId; private final long pollIntervalUS; private final Duration connectionTimeToLive; private final Duration localQueueTimeOut; private final AtomicInteger statusReceivedCounter; private final EndpointResultQueue resultQueue; private final CountDownLatch stopSignal; private final Clock clock; /** * Previous connections on which we may have sent operations and are still waiting for the results * All connections in this are in state SESSION_SYNCED. */ private final List<GatewayConnection> connections = new CopyOnWriteArrayList<>(); OldConnectionsDrainer(Endpoint endpoint, int clusterId, long pollIntervalUS, Duration connectionTimeToLive, Duration localQueueTimeOut, AtomicInteger statusReceivedCounter, EndpointResultQueue resultQueue, CountDownLatch stopSignal, Clock clock) { this.endpoint = endpoint; this.clusterId = clusterId; this.pollIntervalUS = pollIntervalUS; this.connectionTimeToLive = connectionTimeToLive; this.localQueueTimeOut = localQueueTimeOut; this.statusReceivedCounter = statusReceivedCounter; this.resultQueue = resultQueue; this.stopSignal = stopSignal; this.clock = clock; } /** Add another old connection to this for draining */ public void add(GatewayConnection connection) { connections.add(connection); } @Override public void run() { while (stopSignal.getCount() > 0) { checkOldConnections(); try { Thread.sleep(pollIntervalUS/1000); } catch (InterruptedException e) { } } } public void checkOldConnections() { List<GatewayConnection> toRemove = null; for (GatewayConnection connection : connections) { if (closingTime(connection).isBefore(clock.instant())) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); connection.close(); if (toRemove == null) toRemove = new ArrayList<>(1); toRemove.add(connection); } catch (Exception e) { } } else if (timeToPoll(connection)) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); } catch (Exception e) { } } } if (toRemove != null) connections.removeAll(toRemove); } private Instant closingTime(GatewayConnection connection) { return connection.connectionTime().plus(connectionTimeToLive).plus(localQueueTimeOut); } private void close() { int size = resultQueue.getPendingSize(); if (size > 0) { log.info("We have outstanding operations (" + size + ") , trying to fetch responses."); for (GatewayConnection connection : connections) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); } catch (Throwable e) { log.log(Level.SEVERE, "Some failures while trying to get latest responses from vespa.", e); } } } for (GatewayConnection oldConnection : connections) oldConnection.close(); } /** For testing. Returns the old connections of this. */ public List<GatewayConnection> connections() { return Collections.unmodifiableList(connections); } }
class OldConnectionsDrainer implements Runnable { private final Endpoint endpoint; private final int clusterId; private final long pollIntervalUS; private final Duration connectionTimeToLive; private final Duration localQueueTimeOut; private final AtomicInteger statusReceivedCounter; private final EndpointResultQueue resultQueue; private final CountDownLatch stopSignal; private final Clock clock; /** * Previous connections on which we may have sent operations and are still waiting for the results * All connections in this are in state SESSION_SYNCED. */ private final List<GatewayConnection> connections = new CopyOnWriteArrayList<>(); OldConnectionsDrainer(Endpoint endpoint, int clusterId, long pollIntervalUS, Duration connectionTimeToLive, Duration localQueueTimeOut, AtomicInteger statusReceivedCounter, EndpointResultQueue resultQueue, CountDownLatch stopSignal, Clock clock) { this.endpoint = endpoint; this.clusterId = clusterId; this.pollIntervalUS = pollIntervalUS; this.connectionTimeToLive = connectionTimeToLive; this.localQueueTimeOut = localQueueTimeOut; this.statusReceivedCounter = statusReceivedCounter; this.resultQueue = resultQueue; this.stopSignal = stopSignal; this.clock = clock; } /** Add another old connection to this for draining */ public void add(GatewayConnection connection) { connections.add(connection); } @Override public void run() { while (stopSignal.getCount() > 0) { checkOldConnections(); try { Thread.sleep(pollIntervalUS/1000); } catch (InterruptedException e) { } } } public void checkOldConnections() { List<GatewayConnection> toRemove = null; for (GatewayConnection connection : connections) { if (closingTime(connection).isBefore(clock.instant())) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); connection.close(); if (toRemove == null) toRemove = new ArrayList<>(1); toRemove.add(connection); } catch (Exception e) { } } else if (timeToPoll(connection)) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); } catch (Exception e) { } } } if (toRemove != null) connections.removeAll(toRemove); } private Instant closingTime(GatewayConnection connection) { return connection.connectionTime().plus(connectionTimeToLive).plus(localQueueTimeOut); } private void close() { int size = resultQueue.getPendingSize(); if (size > 0) { log.info("We have outstanding operations (" + size + ") , trying to fetch responses."); for (GatewayConnection connection : connections) { try { IOThread.processResponse(connection.poll(), endpoint, clusterId, statusReceivedCounter, resultQueue); } catch (Throwable e) { log.log(Level.SEVERE, "Some failures while trying to get latest responses from vespa.", e); } } } for (GatewayConnection oldConnection : connections) oldConnection.close(); } /** For testing. Returns the old connections of this. */ public List<GatewayConnection> connections() { return Collections.unmodifiableList(connections); } }
Yep.
private ConnectionState cycle(ConnectionState connectionState) { switch(connectionState) { case DISCONNECTED: try { if (! currentConnection.connect()) { log.log(Level.WARNING, "Could not connect to endpoint: '" + endpoint + "'. Will re-try."); drainFirstDocumentsInQueueIfOld(); return ConnectionState.DISCONNECTED; } return ConnectionState.CONNECTED; } catch (Throwable throwable1) { drainFirstDocumentsInQueueIfOld(); log.log(Level.INFO, "Failed connecting to endpoint: '" + endpoint + "'. Will re-try connecting.", throwable1); executeProblemsCounter.incrementAndGet(); return ConnectionState.DISCONNECTED; } case CONNECTED: try { if (isStale(currentConnection)) return refreshConnection(connectionState); currentConnection.handshake(); successfulHandshakes.getAndIncrement(); } catch (ServerResponseException ser) { int code = ser.getResponseCode(); if (code == 401 || code == 403) { drainDocumentQueueWhenFailingPermanently(new Exception("Denied access by endpoint:" + ser.getResponseString())); log.log(Level.SEVERE, "Failed authentication or authorization with '" + endpoint + "': " + Exceptions.toMessageString(ser)); return ConnectionState.CONNECTED; } executeProblemsCounter.incrementAndGet(); log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint + "' failed -- will re-try handshake: " + Exceptions.toMessageString(ser)); drainFirstDocumentsInQueueIfOld(); resultQueue.onEndpointError(new FeedProtocolException(ser.getResponseCode(), ser.getResponseString(), ser, endpoint)); return ConnectionState.CONNECTED; } catch (Throwable throwable) { executeProblemsCounter.incrementAndGet(); resultQueue.onEndpointError(new FeedConnectException(throwable, endpoint)); log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint + "' failed. Will re-try handshake.", throwable); drainFirstDocumentsInQueueIfOld(); currentConnection.close(); return ConnectionState.DISCONNECTED; } return ConnectionState.SESSION_SYNCED; case SESSION_SYNCED: try { if (isStale(currentConnection)) return refreshConnection(connectionState); ProcessResponse processResponse = pullAndProcessData(pollIntervalUS); gatewayThrottler.handleCall(processResponse.transitiveErrorCount); } catch (ServerResponseException ser) { log.log(Level.INFO, "Problems while handing data over to endpoint '" + endpoint + "'. Will re-try. Endpoint responded with an unexpected HTTP response code.", ser); return ConnectionState.CONNECTED; } catch (Throwable e) { log.log(Level.INFO, "Connection level error handing data over to endpoint '" + endpoint + "'. Will re-try.", e); currentConnection.close(); return ConnectionState.DISCONNECTED; } return ConnectionState.SESSION_SYNCED; default: { log.severe("Should never get here."); currentConnection.close(); return ConnectionState.DISCONNECTED; } } }
return ConnectionState.CONNECTED;
private ConnectionState cycle(ConnectionState connectionState) { switch(connectionState) { case DISCONNECTED: try { if (! currentConnection.connect()) { log.log(Level.WARNING, "Could not connect to endpoint: '" + endpoint + "'. Will re-try."); drainFirstDocumentsInQueueIfOld(); return ConnectionState.DISCONNECTED; } return ConnectionState.CONNECTED; } catch (Throwable throwable1) { drainFirstDocumentsInQueueIfOld(); log.log(Level.INFO, "Failed connecting to endpoint: '" + endpoint + "'. Will re-try connecting.", throwable1); executeProblemsCounter.incrementAndGet(); return ConnectionState.DISCONNECTED; } case CONNECTED: try { if (isStale(currentConnection)) return refreshConnection(connectionState); currentConnection.handshake(); successfulHandshakes.getAndIncrement(); } catch (ServerResponseException ser) { int code = ser.getResponseCode(); if (code == 401 || code == 403) { drainDocumentQueueWhenFailingPermanently(new Exception("Denied access by endpoint:" + ser.getResponseString())); log.log(Level.SEVERE, "Failed authentication or authorization with '" + endpoint + "': " + Exceptions.toMessageString(ser)); return ConnectionState.CONNECTED; } executeProblemsCounter.incrementAndGet(); log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint + "' failed -- will re-try handshake: " + Exceptions.toMessageString(ser)); drainFirstDocumentsInQueueIfOld(); resultQueue.onEndpointError(new FeedProtocolException(ser.getResponseCode(), ser.getResponseString(), ser, endpoint)); return ConnectionState.CONNECTED; } catch (Throwable throwable) { executeProblemsCounter.incrementAndGet(); resultQueue.onEndpointError(new FeedConnectException(throwable, endpoint)); log.log(Level.INFO, "Failed talking to endpoint. Handshake with server endpoint '" + endpoint + "' failed. Will re-try handshake.", throwable); drainFirstDocumentsInQueueIfOld(); currentConnection.close(); return ConnectionState.DISCONNECTED; } return ConnectionState.SESSION_SYNCED; case SESSION_SYNCED: try { if (isStale(currentConnection)) return refreshConnection(connectionState); ProcessResponse processResponse = pullAndProcessData(pollIntervalUS); gatewayThrottler.handleCall(processResponse.transitiveErrorCount); } catch (ServerResponseException ser) { log.log(Level.INFO, "Problems while handing data over to endpoint '" + endpoint + "'. Will re-try. Endpoint responded with an unexpected HTTP response code.", ser); return ConnectionState.CONNECTED; } catch (Throwable e) { log.log(Level.INFO, "Connection level error handing data over to endpoint '" + endpoint + "'. Will re-try.", e); currentConnection.close(); return ConnectionState.DISCONNECTED; } return ConnectionState.SESSION_SYNCED; default: { log.severe("Should never get here."); currentConnection.close(); return ConnectionState.DISCONNECTED; } } }
class ProcessResponse { private final int transitiveErrorCount; private final int processResultsCount; ProcessResponse(int transitiveErrorCount, int processResultsCount) { this.transitiveErrorCount = transitiveErrorCount; this.processResultsCount = processResultsCount; } }
class ProcessResponse { private final int transitiveErrorCount; private final int processResultsCount; ProcessResponse(int transitiveErrorCount, int processResultsCount) { this.transitiveErrorCount = transitiveErrorCount; this.processResultsCount = processResultsCount; } }
Sure this shouldn't be synchronized, but more fine-grained than on the session repository shared by all sessions?
public ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.putSession(newSession); return newApplicationSet; }
remoteSessionCache.putSession(newSession);
public ApplicationSet ensureApplicationLoaded(RemoteSession session) { try (var lock = lock(session.sessionId)) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } ApplicationSet applicationSet = loadApplication(session); remoteSessionCache.putSession(session.activated(applicationSet)); return applicationSet; } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.putSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.putSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.putSession(remoteSession.deactivated()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, () -> "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); deactivate(remoteSession); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.putSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.putSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.putSession(remoteSession.deactivated()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, () -> "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); deactivate(remoteSession); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
Yes, I think you are right. Need to look into that.
public ApplicationSet ensureApplicationLoaded(RemoteSession session) { Optional<ApplicationSet> applicationSet = session.applicationSet(); if (applicationSet.isPresent()) { return applicationSet.get(); } ApplicationSet newApplicationSet = loadApplication(session); RemoteSession newSession = new RemoteSession(session.getTenantName(), session.getSessionId(), session.getSessionZooKeeperClient(), Optional.of(newApplicationSet)); remoteSessionCache.putSession(newSession); return newApplicationSet; }
remoteSessionCache.putSession(newSession);
public ApplicationSet ensureApplicationLoaded(RemoteSession session) { try (var lock = lock(session.sessionId)) { if (session.applicationSet().isPresent()) { return session.applicationSet().get(); } ApplicationSet applicationSet = loadApplication(session); remoteSessionCache.putSession(session.activated(applicationSet)); return applicationSet; } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.putSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.putSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.putSession(remoteSession.deactivated()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, () -> "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); deactivate(remoteSession); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.INFO, "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
class SessionRepository { private static final Logger log = Logger.getLogger(SessionRepository.class.getName()); private static final FilenameFilter sessionApplicationsFilter = (dir, name) -> name.matches("\\d+"); private static final long nonExistingActiveSessionId = 0; private final SessionCache<LocalSession> localSessionCache = new SessionCache<>(); private final SessionCache<RemoteSession> remoteSessionCache = new SessionCache<>(); private final Map<Long, SessionStateWatcher> sessionStateWatchers = new HashMap<>(); private final Duration sessionLifetime; private final Clock clock; private final Curator curator; private final Executor zkWatcherExecutor; private final TenantFileSystemDirs tenantFileSystemDirs; private final BooleanFlag distributeApplicationPackage; private final MetricUpdater metrics; private final Curator.DirectoryCache directoryCache; private final TenantApplications applicationRepo; private final SessionPreparer sessionPreparer; private final Path sessionsPath; private final TenantName tenantName; private final GlobalComponentRegistry componentRegistry; private final Path locksPath; public SessionRepository(TenantName tenantName, GlobalComponentRegistry componentRegistry, TenantApplications applicationRepo, FlagSource flagSource, SessionPreparer sessionPreparer) { this.tenantName = tenantName; this.componentRegistry = componentRegistry; this.sessionsPath = TenantRepository.getSessionsPath(tenantName); this.clock = componentRegistry.getClock(); this.curator = componentRegistry.getCurator(); this.sessionLifetime = Duration.ofSeconds(componentRegistry.getConfigserverConfig().sessionLifetime()); this.zkWatcherExecutor = command -> componentRegistry.getZkWatcherExecutor().execute(tenantName, command); this.tenantFileSystemDirs = new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName); this.applicationRepo = applicationRepo; this.sessionPreparer = sessionPreparer; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); this.metrics = componentRegistry.getMetrics().getOrCreateMetricUpdater(Metrics.createDimensions(tenantName)); this.locksPath = TenantRepository.getLocksPath(tenantName); loadSessions(); this.directoryCache = curator.createDirectoryCache(sessionsPath.getAbsolute(), false, false, componentRegistry.getZkCacheExecutor()); this.directoryCache.addListener(this::childEvent); this.directoryCache.start(); } private void loadSessions() { loadLocalSessions(); initializeRemoteSessions(); } public synchronized void addLocalSession(LocalSession session) { localSessionCache.putSession(session); long sessionId = session.getSessionId(); RemoteSession remoteSession = createRemoteSession(sessionId); addSessionStateWatcher(sessionId, remoteSession, Optional.of(session)); } public LocalSession getLocalSession(long sessionId) { return localSessionCache.getSession(sessionId); } public List<LocalSession> getLocalSessions() { return localSessionCache.getSessions(); } private void loadLocalSessions() { File[] sessions = tenantFileSystemDirs.sessionsPath().listFiles(sessionApplicationsFilter); if (sessions == null) return; for (File session : sessions) { try { addLocalSession(createSessionFromId(Long.parseLong(session.getName()))); } catch (IllegalArgumentException e) { log.log(Level.WARNING, "Could not load session '" + session.getAbsolutePath() + "':" + e.getMessage() + ", skipping it."); } } } public ConfigChangeActions prepareLocalSession(LocalSession session, DeployLogger logger, PrepareParams params, Optional<ApplicationSet> currentActiveApplicationSet, Path tenantPath, Instant now) { applicationRepo.createApplication(params.getApplicationId()); logger.log(Level.FINE, "Created application " + params.getApplicationId()); long sessionId = session.getSessionId(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); Curator.CompletionWaiter waiter = sessionZooKeeperClient.createPrepareWaiter(); ConfigChangeActions actions = sessionPreparer.prepare(applicationRepo.getHostValidator(), logger, params, currentActiveApplicationSet, tenantPath, now, getSessionAppDir(sessionId), session.getApplicationPackage(), sessionZooKeeperClient) .getConfigChangeActions(); setPrepared(session); waiter.awaitCompletion(params.getTimeoutBudget().timeLeft()); return actions; } public void deleteExpiredSessions(Map<ApplicationId, Long> activeSessions) { log.log(Level.FINE, () -> "Purging old sessions for tenant '" + tenantName + "'"); try { for (LocalSession candidate : localSessionCache.getSessions()) { Instant createTime = candidate.getCreateTime(); log.log(Level.FINE, () -> "Candidate session for deletion: " + candidate.getSessionId() + ", created: " + createTime); if (hasExpired(candidate) && !isActiveSession(candidate)) { deleteLocalSession(candidate); } else if (createTime.plus(Duration.ofDays(1)).isBefore(clock.instant())) { Optional<ApplicationId> applicationId = candidate.getOptionalApplicationId(); if (applicationId.isEmpty()) continue; Long activeSession = activeSessions.get(applicationId.get()); if (activeSession == null || activeSession != candidate.getSessionId()) { deleteLocalSession(candidate); log.log(Level.INFO, "Deleted inactive session " + candidate.getSessionId() + " created " + createTime + " for '" + applicationId + "'"); } } } } catch (Throwable e) { log.log(Level.WARNING, "Error when purging old sessions ", e); } log.log(Level.FINE, () -> "Done purging old sessions"); } private boolean hasExpired(LocalSession candidate) { return (candidate.getCreateTime().plus(sessionLifetime).isBefore(clock.instant())); } private boolean isActiveSession(LocalSession candidate) { return candidate.getStatus() == Session.Status.ACTIVATE; } public void deleteLocalSession(LocalSession session) { long sessionId = session.getSessionId(); try (Lock lock = lock(sessionId)) { log.log(Level.FINE, () -> "Deleting local session " + sessionId); SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); localSessionCache.removeSession(sessionId); deletePersistentData(sessionId); } } private void deletePersistentData(long sessionId) { NestedTransaction transaction = new NestedTransaction(); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); transaction.add(sessionZooKeeperClient.deleteTransaction(), FileTransaction.class); transaction.add(FileTransaction.from(FileOperations.delete(getSessionAppDir(sessionId).getAbsolutePath()))); transaction.commit(); } public void close() { deleteAllSessions(); tenantFileSystemDirs.delete(); try { if (directoryCache != null) { directoryCache.close(); } } catch (Exception e) { log.log(Level.WARNING, "Exception when closing path cache", e); } finally { checkForRemovedSessions(new ArrayList<>()); } } private void deleteAllSessions() { List<LocalSession> sessions = new ArrayList<>(localSessionCache.getSessions()); for (LocalSession session : sessions) { deleteLocalSession(session); } } public RemoteSession getRemoteSession(long sessionId) { return remoteSessionCache.getSession(sessionId); } public List<Long> getRemoteSessions() { return getSessionList(curator.getChildren(sessionsPath)); } public void addRemoteSession(RemoteSession session) { remoteSessionCache.putSession(session); metrics.incAddedSessions(); } public int deleteExpiredRemoteSessions(Clock clock, Duration expiryTime) { int deleted = 0; for (long sessionId : getRemoteSessions()) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; if (session.getStatus() == Session.Status.ACTIVATE) continue; if (sessionHasExpired(session.getCreateTime(), expiryTime, clock)) { log.log(Level.FINE, () -> "Remote session " + sessionId + " for " + tenantName + " has expired, deleting it"); deleteSession(session); deleted++; } } return deleted; } public void deactivate(RemoteSession remoteSession) { remoteSessionCache.putSession(remoteSession.deactivated()); } public void deleteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Transaction transaction = sessionZooKeeperClient.deleteTransaction(); transaction.commit(); transaction.close(); } public int deleteExpiredLocks(Clock clock, Duration expiryTime) { int deleted = 0; for (var lock : curator.getChildren(locksPath)) { Path path = locksPath.append(lock); if (zooKeeperNodeCreated(path).orElse(clock.instant()).isBefore(clock.instant().minus(expiryTime))) { log.log(Level.FINE, () -> "Lock " + path + " has expired, deleting it"); curator.delete(path); deleted++; } } return deleted; } private Optional<Instant> zooKeeperNodeCreated(Path path) { return curator.getStat(path).map(s -> Instant.ofEpochMilli(s.getCtime())); } private boolean sessionHasExpired(Instant created, Duration expiryTime, Clock clock) { return (created.plus(expiryTime).isBefore(clock.instant())); } private List<Long> getSessionListFromDirectoryCache(List<ChildData> children) { return getSessionList(children.stream() .map(child -> Path.fromString(child.getPath()).getName()) .collect(Collectors.toList())); } private List<Long> getSessionList(List<String> children) { return children.stream().map(Long::parseLong).collect(Collectors.toList()); } private void initializeRemoteSessions() throws NumberFormatException { getRemoteSessions().forEach(this::sessionAdded); } private synchronized void sessionsChanged() throws NumberFormatException { List<Long> sessions = getSessionListFromDirectoryCache(directoryCache.getCurrentData()); checkForRemovedSessions(sessions); checkForAddedSessions(sessions); } private void checkForRemovedSessions(List<Long> sessions) { for (RemoteSession session : remoteSessionCache.getSessions()) if ( ! sessions.contains(session.getSessionId())) sessionRemoved(session.getSessionId()); } private void checkForAddedSessions(List<Long> sessions) { for (Long sessionId : sessions) if (remoteSessionCache.getSession(sessionId) == null) sessionAdded(sessionId); } /** * A session for which we don't have a watcher, i.e. hitherto unknown to us. * * @param sessionId session id for the new session */ public void sessionAdded(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); if (sessionZKClient.readStatus().equals(Session.Status.DELETE)) return; log.log(Level.FINE, () -> "Adding remote session to SessionRepository: " + sessionId); RemoteSession remoteSession = createRemoteSession(sessionId); loadSessionIfActive(remoteSession); addRemoteSession(remoteSession); Optional<LocalSession> localSession = Optional.empty(); if (distributeApplicationPackage()) localSession = createLocalSessionUsingDistributedApplicationPackage(sessionId); addSessionStateWatcher(sessionId, remoteSession, localSession); } void activate(RemoteSession session) { long sessionId = session.getSessionId(); Curator.CompletionWaiter waiter = createSessionZooKeeperClient(sessionId).getActiveWaiter(); log.log(Level.FINE, () -> session.logPre() + "Getting session from repo: " + sessionId); ApplicationSet app = ensureApplicationLoaded(session); log.log(Level.FINE, () -> session.logPre() + "Reloading config for " + sessionId); applicationRepo.reloadConfig(app); log.log(Level.FINE, () -> session.logPre() + "Notifying " + waiter); notifyCompletion(waiter, session); log.log(Level.INFO, session.logPre() + "Session activated: " + sessionId); } void deleteSession(RemoteSession remoteSession, Optional<LocalSession> localSession) { localSession.ifPresent(this::deleteLocalSession); deactivate(remoteSession); } boolean distributeApplicationPackage() { return distributeApplicationPackage.value(); } private void sessionRemoved(long sessionId) { SessionStateWatcher watcher = sessionStateWatchers.remove(sessionId); if (watcher != null) watcher.close(); remoteSessionCache.removeSession(sessionId); metrics.incRemovedSessions(); } private void loadSessionIfActive(RemoteSession session) { for (ApplicationId applicationId : applicationRepo.activeApplications()) { if (applicationRepo.requireActiveSessionOf(applicationId) == session.getSessionId()) { log.log(Level.FINE, () -> "Found active application for session " + session.getSessionId() + " , loading it"); applicationRepo.reloadConfig(ensureApplicationLoaded(session)); log.log(Level.INFO, session.logPre() + "Application activated successfully: " + applicationId + " (generation " + session.getSessionId() + ")"); return; } } } void prepareRemoteSession(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); Curator.CompletionWaiter waiter = sessionZooKeeperClient.getPrepareWaiter(); ensureApplicationLoaded(session); notifyCompletion(waiter, session); } void confirmUpload(RemoteSession session) { Curator.CompletionWaiter waiter = session.getSessionZooKeeperClient().getUploadWaiter(); long sessionId = session.getSessionId(); log.log(Level.FINE, "Notifying upload waiter for session " + sessionId); notifyCompletion(waiter, session); log.log(Level.FINE, "Done notifying upload for session " + sessionId); } void notifyCompletion(Curator.CompletionWaiter completionWaiter, RemoteSession session) { try { completionWaiter.notifyCompletion(); } catch (RuntimeException e) { Set<Class<? extends KeeperException>> acceptedExceptions = Set.of(KeeperException.NoNodeException.class, KeeperException.NodeExistsException.class); Class<? extends Throwable> exceptionClass = e.getCause().getClass(); if (acceptedExceptions.contains(exceptionClass)) log.log(Level.FINE, "Not able to notify completion for session " + session.getSessionId() + " (" + completionWaiter + ")," + " node " + (exceptionClass.equals(KeeperException.NoNodeException.class) ? "has been deleted" : "already exists")); else throw e; } } private ApplicationSet loadApplication(RemoteSession session) { SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(session.getSessionId()); ApplicationPackage applicationPackage = sessionZooKeeperClient.loadApplicationPackage(); ActivatedModelsBuilder builder = new ActivatedModelsBuilder(session.getTenantName(), session.getSessionId(), sessionZooKeeperClient, componentRegistry); Optional<AllocatedHosts> allocatedHosts = applicationPackage.getAllocatedHosts(); return ApplicationSet.fromList(builder.buildModels(session.getApplicationId(), sessionZooKeeperClient.readDockerImageRepository(), sessionZooKeeperClient.readVespaVersion(), applicationPackage, new SettableOptional<>(allocatedHosts), clock.instant())); } private void nodeChanged() { zkWatcherExecutor.execute(() -> { Multiset<Session.Status> sessionMetrics = HashMultiset.create(); for (RemoteSession session : remoteSessionCache.getSessions()) { sessionMetrics.add(session.getStatus()); } metrics.setNewSessions(sessionMetrics.count(Session.Status.NEW)); metrics.setPreparedSessions(sessionMetrics.count(Session.Status.PREPARE)); metrics.setActivatedSessions(sessionMetrics.count(Session.Status.ACTIVATE)); metrics.setDeactivatedSessions(sessionMetrics.count(Session.Status.DEACTIVATE)); }); } @SuppressWarnings("unused") private void childEvent(CuratorFramework ignored, PathChildrenCacheEvent event) { zkWatcherExecutor.execute(() -> { log.log(Level.FINE, () -> "Got child event: " + event); switch (event.getType()) { case CHILD_ADDED: sessionsChanged(); synchronizeOnNew(getSessionListFromDirectoryCache(Collections.singletonList(event.getData()))); break; case CHILD_REMOVED: case CONNECTION_RECONNECTED: sessionsChanged(); break; } }); } private void synchronizeOnNew(List<Long> sessionList) { for (long sessionId : sessionList) { RemoteSession session = remoteSessionCache.getSession(sessionId); if (session == null) continue; log.log(Level.FINE, () -> session.logPre() + "Confirming upload for session " + sessionId); confirmUpload(session); } } /** * Creates a new deployment session from an application package. * * @param applicationDirectory a File pointing to an application. * @param applicationId application id for this new session. * @param timeoutBudget Timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSession(File applicationDirectory, ApplicationId applicationId, TimeoutBudget timeoutBudget, Optional<Long> activeSessionId) { return create(applicationDirectory, applicationId, activeSessionId, false, timeoutBudget); } public RemoteSession createRemoteSession(long sessionId) { SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new RemoteSession(tenantName, sessionId, sessionZKClient); } private void ensureSessionPathDoesNotExist(long sessionId) { Path sessionPath = getSessionPath(sessionId); if (componentRegistry.getConfigCurator().exists(sessionPath.getAbsolute())) { throw new IllegalArgumentException("Path " + sessionPath.getAbsolute() + " already exists in ZooKeeper"); } } private ApplicationPackage createApplication(File userDir, File configApplicationDir, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) { long deployTimestamp = System.currentTimeMillis(); String user = System.getenv("USER"); if (user == null) { user = "unknown"; } DeployData deployData = new DeployData(user, userDir.getAbsolutePath(), applicationId, deployTimestamp, internalRedeploy, sessionId, currentlyActiveSessionId.orElse(nonExistingActiveSessionId)); return FilesApplicationPackage.fromFileWithDeployData(configApplicationDir, deployData); } private LocalSession createSessionFromApplication(ApplicationPackage applicationPackage, long sessionId, TimeoutBudget timeoutBudget, Clock clock) { log.log(Level.FINE, () -> TenantRepository.logPre(tenantName) + "Creating session " + sessionId + " in ZooKeeper"); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); sessionZKClient.createNewSession(clock.instant()); Curator.CompletionWaiter waiter = sessionZKClient.getUploadWaiter(); LocalSession session = new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); waiter.awaitCompletion(timeoutBudget.timeLeft()); return session; } /** * Creates a new deployment session from an already existing session. * * @param existingSession the session to use as base * @param logger a deploy logger where the deploy log will be written. * @param internalRedeploy whether this session is for a system internal redeploy — not an application package change * @param timeoutBudget timeout for creating session and waiting for other servers. * @return a new session */ public LocalSession createSessionFromExisting(Session existingSession, DeployLogger logger, boolean internalRedeploy, TimeoutBudget timeoutBudget) { File existingApp = getSessionAppDir(existingSession.getSessionId()); ApplicationId existingApplicationId = existingSession.getApplicationId(); Optional<Long> activeSessionId = getActiveSessionId(existingApplicationId); logger.log(Level.FINE, "Create new session for application id '" + existingApplicationId + "' from existing active session " + activeSessionId); LocalSession session = create(existingApp, existingApplicationId, activeSessionId, internalRedeploy, timeoutBudget); session.setApplicationId(existingApplicationId); if (distributeApplicationPackage() && existingSession.getApplicationPackageReference() != null) { session.setApplicationPackageReference(existingSession.getApplicationPackageReference()); } session.setVespaVersion(existingSession.getVespaVersion()); session.setDockerImageRepository(existingSession.getDockerImageRepository()); session.setAthenzDomain(existingSession.getAthenzDomain()); return session; } private LocalSession create(File applicationFile, ApplicationId applicationId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy, TimeoutBudget timeoutBudget) { long sessionId = getNextSessionId(); try { ensureSessionPathDoesNotExist(sessionId); ApplicationPackage app = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); return createSessionFromApplication(app, sessionId, timeoutBudget, clock); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } /** * This method is used when creating a session based on a remote session and the distributed application package * It does not wait for session being created on other servers */ private LocalSession createLocalSession(File applicationFile, ApplicationId applicationId, long sessionId) { try { Optional<Long> currentlyActiveSessionId = getActiveSessionId(applicationId); ApplicationPackage applicationPackage = createApplicationPackage(applicationFile, applicationId, sessionId, currentlyActiveSessionId, false); SessionZooKeeperClient sessionZooKeeperClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZooKeeperClient); } catch (Exception e) { throw new RuntimeException("Error creating session " + sessionId, e); } } private ApplicationPackage createApplicationPackage(File applicationFile, ApplicationId applicationId, long sessionId, Optional<Long> currentlyActiveSessionId, boolean internalRedeploy) throws IOException { File userApplicationDir = getSessionAppDir(sessionId); copyApp(applicationFile, userApplicationDir); ApplicationPackage applicationPackage = createApplication(applicationFile, userApplicationDir, applicationId, sessionId, currentlyActiveSessionId, internalRedeploy); applicationPackage.writeMetaData(); return applicationPackage; } private void copyApp(File sourceDir, File destinationDir) throws IOException { if (destinationDir.exists()) throw new RuntimeException("Destination dir " + destinationDir + " already exists"); if (! sourceDir.isDirectory()) throw new IllegalArgumentException(sourceDir.getAbsolutePath() + " is not a directory"); java.nio.file.Path tempDestinationDir = null; try { tempDestinationDir = Files.createTempDirectory(destinationDir.getParentFile().toPath(), "app-package"); log.log(Level.FINE, "Copying dir " + sourceDir.getAbsolutePath() + " to " + tempDestinationDir.toFile().getAbsolutePath()); IOUtils.copyDirectory(sourceDir, tempDestinationDir.toFile()); log.log(Level.FINE, "Moving " + tempDestinationDir + " to " + destinationDir.getAbsolutePath()); Files.move(tempDestinationDir, destinationDir.toPath(), StandardCopyOption.ATOMIC_MOVE); } finally { if (tempDestinationDir != null) IOUtils.recursiveDeleteDir(tempDestinationDir.toFile()); } } /** * Returns a new session instance for the given session id. */ LocalSession createSessionFromId(long sessionId) { File sessionDir = getAndValidateExistingSessionAppDir(sessionId); ApplicationPackage applicationPackage = FilesApplicationPackage.fromFile(sessionDir); SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); return new LocalSession(tenantName, sessionId, applicationPackage, sessionZKClient); } /** * Returns a new local session for the given session id if it does not already exist. * Will also add the session to the local session cache if necessary */ public Optional<LocalSession> createLocalSessionUsingDistributedApplicationPackage(long sessionId) { if (applicationRepo.hasLocalSession(sessionId)) { log.log(Level.FINE, () -> "Local session for session id " + sessionId + " already exists"); return Optional.of(createSessionFromId(sessionId)); } SessionZooKeeperClient sessionZKClient = createSessionZooKeeperClient(sessionId); FileReference fileReference = sessionZKClient.readApplicationPackageReference(); log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference); if (fileReference != null) { File rootDir = new File(Defaults.getDefaults().underVespaHome(componentRegistry.getConfigserverConfig().fileReferencesDir())); File sessionDir; FileDirectory fileDirectory = new FileDirectory(rootDir); try { sessionDir = fileDirectory.getFile(fileReference); } catch (IllegalArgumentException e) { log.log(Level.FINE, () -> "File reference for session id " + sessionId + ": " + fileReference + " not found in " + fileDirectory); return Optional.empty(); } ApplicationId applicationId = sessionZKClient.readApplicationId() .orElseThrow(() -> new RuntimeException("Could not find application id for session " + sessionId)); log.log(Level.FINE, () -> "Creating local session for tenant '" + tenantName + "' with session id " + sessionId); LocalSession localSession = createLocalSession(sessionDir, applicationId, sessionId); addLocalSession(localSession); return Optional.of(localSession); } return Optional.empty(); } private Optional<Long> getActiveSessionId(ApplicationId applicationId) { List<ApplicationId> applicationIds = applicationRepo.activeApplications(); return applicationIds.contains(applicationId) ? Optional.of(applicationRepo.requireActiveSessionOf(applicationId)) : Optional.empty(); } private long getNextSessionId() { return new SessionCounter(componentRegistry.getConfigCurator(), tenantName).nextSessionId(); } public Path getSessionPath(long sessionId) { return sessionsPath.append(String.valueOf(sessionId)); } Path getSessionStatePath(long sessionId) { return getSessionPath(sessionId).append(ConfigCurator.SESSIONSTATE_ZK_SUBPATH); } private SessionZooKeeperClient createSessionZooKeeperClient(long sessionId) { String serverId = componentRegistry.getConfigserverConfig().serverId(); return new SessionZooKeeperClient(curator, componentRegistry.getConfigCurator(), tenantName, sessionId, serverId); } private File getAndValidateExistingSessionAppDir(long sessionId) { File appDir = getSessionAppDir(sessionId); if (!appDir.exists() || !appDir.isDirectory()) { throw new IllegalArgumentException("Unable to find correct application directory for session " + sessionId); } return appDir; } private File getSessionAppDir(long sessionId) { return new TenantFileSystemDirs(componentRegistry.getConfigServerDB(), tenantName).getUserApplicationDir(sessionId); } private void addSessionStateWatcher(long sessionId, RemoteSession remoteSession, Optional<LocalSession> localSession) { if (sessionStateWatchers.containsKey(sessionId)) { localSession.ifPresent(session -> sessionStateWatchers.get(sessionId).addLocalSession(session)); } else { Curator.FileCache fileCache = curator.createFileCache(getSessionStatePath(sessionId).getAbsolute(), false); fileCache.addListener(this::nodeChanged); sessionStateWatchers.put(sessionId, new SessionStateWatcher(fileCache, remoteSession, localSession, metrics, zkWatcherExecutor, this)); } } @Override public String toString() { return getLocalSessions().toString(); } /** Returns the lock for session operations for the given session id. */ public Lock lock(long sessionId) { return curator.lock(lockPath(sessionId), Duration.ofMinutes(1)); } public Clock clock() { return clock; } private Path lockPath(long sessionId) { return locksPath.append(String.valueOf(sessionId)); } public Transaction createActivateTransaction(Session session) { Transaction transaction = createSetStatusTransaction(session, Session.Status.ACTIVATE); transaction.add(applicationRepo.createPutTransaction(session.getApplicationId(), session.getSessionId()).operations()); return transaction; } private Transaction createSetStatusTransaction(Session session, Session.Status status) { return session.sessionZooKeeperClient.createWriteStatusTransaction(status); } void setPrepared(Session session) { session.setStatus(Session.Status.PREPARE); } private static class FileTransaction extends AbstractTransaction { public static FileTransaction from(FileOperation operation) { FileTransaction transaction = new FileTransaction(); transaction.add(operation); return transaction; } @Override public void prepare() { } @Override public void commit() { for (Operation operation : operations()) ((FileOperation)operation).commit(); } } /** Factory for file operations */ private static class FileOperations { /** Creates an operation which recursively deletes the given path */ public static DeleteOperation delete(String pathToDelete) { return new DeleteOperation(pathToDelete); } } private interface FileOperation extends Transaction.Operation { void commit(); } /** * Recursively deletes this path and everything below. * Succeeds with no action if the path does not exist. */ private static class DeleteOperation implements FileOperation { private final String pathToDelete; DeleteOperation(String pathToDelete) { this.pathToDelete = pathToDelete; } @Override public void commit() { IOUtils.recursiveDeleteDir(new File(pathToDelete)); } } }
The second part of this reads more like we don't require having any measurements from all nodes, instead of not requiring at least `minimumMeasurementsPerNode` for all nodes
private Optional<Double> averageLoad(Resource resource, List<Node> clusterNodes, ClusterSpec.Type clusterType) { NodeMetricsDb.Window window = metricsDb.getWindow(nodeRepository.clock().instant().minus(scalingWindow(clusterType)), resource, clusterNodes.stream().map(Node::hostname).collect(Collectors.toList())); if (window.measurementCount()/clusterNodes.size() < minimumMeasurementsPerNode) return Optional.empty(); if (window.hostnames() != clusterNodes.size()) return Optional.empty(); return Optional.of(window.average()); }
private Optional<Double> averageLoad(Resource resource, List<Node> clusterNodes, ClusterSpec.Type clusterType) { NodeMetricsDb.Window window = metricsDb.getWindow(nodeRepository.clock().instant().minus(scalingWindow(clusterType)), resource, clusterNodes.stream().map(Node::hostname).collect(Collectors.toList())); if (window.measurementCount()/clusterNodes.size() < minimumMeasurementsPerNode) return Optional.empty(); if (window.hostnames() != clusterNodes.size()) return Optional.empty(); return Optional.of(window.average()); }
class Autoscaler { private static final int minimumMeasurementsPerNode = 60; /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final NodeMetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(NodeMetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return a new suggested allocation for this cluster, or empty if it should not be rescaled at this time */ public Optional<ClusterResources> suggest(Cluster cluster, List<Node> clusterNodes) { return autoscale(clusterNodes, Limits.empty(), cluster.exclusive()) .map(AllocatableClusterResources::toAdvertisedClusterResources); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return a new suggested allocation for this cluster, or empty if it should not be rescaled at this time */ public Optional<ClusterResources> autoscale(Cluster cluster, List<Node> clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Optional.empty(); return autoscale(clusterNodes, Limits.of(cluster), cluster.exclusive()) .map(AllocatableClusterResources::toAdvertisedClusterResources); } private Optional<AllocatableClusterResources> autoscale(List<Node> clusterNodes, Limits limits, boolean exclusive) { if (unstable(clusterNodes)) return Optional.empty(); ClusterSpec.Type clusterType = clusterNodes.get(0).allocation().get().membership().cluster().type(); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes, nodeRepository); Optional<Double> cpuLoad = averageLoad(Resource.cpu, clusterNodes, clusterType); Optional<Double> memoryLoad = averageLoad(Resource.memory, clusterNodes, clusterType); Optional<Double> diskLoad = averageLoad(Resource.disk, clusterNodes, clusterType); if (cpuLoad.isEmpty() || memoryLoad.isEmpty() || diskLoad.isEmpty()) return Optional.empty(); var target = ResourceTarget.idealLoad(cpuLoad.get(), memoryLoad.get(), diskLoad.get(), currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Optional.empty(); if (similar(bestAllocation.get(), currentAllocation)) return Optional.empty(); return bestAllocation; } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / r1 < threshold; } /** * Returns the average load of this resource in the measurement window, * or empty if we are not in a position to make decisions from these measurements at this time. */ /** The duration of the window we need to consider to make a scaling decision */ private Duration scalingWindow(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return Duration.ofHours(12); return Duration.ofHours(12); } public static boolean unstable(List<Node> nodes) { return nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable()); } }
class Autoscaler { private static final int minimumMeasurementsPerNode = 60; /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final NodeMetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(NodeMetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return a new suggested allocation for this cluster, or empty if it should not be rescaled at this time */ public Optional<ClusterResources> suggest(Cluster cluster, List<Node> clusterNodes) { return autoscale(clusterNodes, Limits.empty(), cluster.exclusive()) .map(AllocatableClusterResources::toAdvertisedClusterResources); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return a new suggested allocation for this cluster, or empty if it should not be rescaled at this time */ public Optional<ClusterResources> autoscale(Cluster cluster, List<Node> clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Optional.empty(); return autoscale(clusterNodes, Limits.of(cluster), cluster.exclusive()) .map(AllocatableClusterResources::toAdvertisedClusterResources); } private Optional<AllocatableClusterResources> autoscale(List<Node> clusterNodes, Limits limits, boolean exclusive) { if (unstable(clusterNodes)) return Optional.empty(); ClusterSpec.Type clusterType = clusterNodes.get(0).allocation().get().membership().cluster().type(); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes, nodeRepository); Optional<Double> cpuLoad = averageLoad(Resource.cpu, clusterNodes, clusterType); Optional<Double> memoryLoad = averageLoad(Resource.memory, clusterNodes, clusterType); Optional<Double> diskLoad = averageLoad(Resource.disk, clusterNodes, clusterType); if (cpuLoad.isEmpty() || memoryLoad.isEmpty() || diskLoad.isEmpty()) return Optional.empty(); var target = ResourceTarget.idealLoad(cpuLoad.get(), memoryLoad.get(), diskLoad.get(), currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Optional.empty(); if (similar(bestAllocation.get(), currentAllocation)) return Optional.empty(); return bestAllocation; } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / r1 < threshold; } /** * Returns the average load of this resource in the measurement window, * or empty if we are not in a position to make decisions from these measurements at this time. */ /** The duration of the window we need to consider to make a scaling decision */ private Duration scalingWindow(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return Duration.ofHours(12); return Duration.ofHours(12); } public static boolean unstable(List<Node> nodes) { return nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable()); } }
Why not simply get and set? ``` DataSourceIf ds = nameToCatalogs.get(log.getCatalogName()); ds.modifyDatasourceName(log.getNewCatalogName()); ```
public void replayAlterCatalogName(CatalogLog log) { writeLock(); try { DataSourceIf ds = nameToCatalogs.remove(log.getCatalogName()); ds.modifyDatasourceName(log.getNewCatalogName()); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } }
nameToCatalogs.put(ds.getName(), ds);
public void replayAlterCatalogName(CatalogLog log) { writeLock(); try { DataSourceIf ds = nameToCatalogs.remove(log.getCatalogName()); ds.modifyDatasourceName(log.getNewCatalogName()); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } }
class DataSourceMgr implements Writable { private static final Logger LOG = LogManager.getLogger(DataSourceMgr.class); private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true); private final Map<String, DataSourceIf> nameToCatalogs = Maps.newConcurrentMap(); private InternalDataSource internalDataSource; public DataSourceMgr() { initInternalDataSource(); } private void initInternalDataSource() { internalDataSource = new InternalDataSource(); nameToCatalogs.put(internalDataSource.getName(), internalDataSource); } public InternalDataSource getInternalDataSource() { return internalDataSource; } private void writeLock() { lock.writeLock().lock(); } private void writeUnlock() { lock.writeLock().unlock(); } private void readLock() { lock.readLock().lock(); } private void readUnlock() { lock.readLock().unlock(); } /** * Create and hold the catalog instance and write the meta log. */ public void createCatalog(CreateCatalogStmt stmt) throws UserException { if (stmt.isSetIfNotExists() && nameToCatalogs.containsKey(stmt.getCatalogName())) { LOG.warn("Catalog {} is already exist.", stmt.getCatalogName()); return; } if (nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("Catalog had already exist with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayCreateCatalog(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_CREATE_DS, log); } /** * Remove the catalog instance by name and write the meta log. */ public void dropCatalog(DropCatalogStmt stmt) throws UserException { if (stmt.isSetIfExists() && !nameToCatalogs.containsKey(stmt.getCatalogName())) { LOG.warn("Non catalog {} is found.", stmt.getCatalogName()); return; } if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayDropCatalog(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_DROP_DS, log); } /** * Modify the catalog name into a new one and write the meta log. */ public void alterCatalogName(AlterCatalogNameStmt stmt) throws UserException { if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayAlterCatalogName(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_ALTER_DS_NAME, log); } /** * Modify the catalog property and write the meta log. */ public void alterCatalogProps(AlterCatalogPropertyStmt stmt) throws UserException { if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } if (!nameToCatalogs.get(stmt.getCatalogName()) .getType().equalsIgnoreCase(stmt.getNewProperties().get("type"))) { throw new DdlException("Can't modify the type of catalog property with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayAlterCatalogProps(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_ALTER_DS_PROPS, log); } /** * List all catalog or get the special catalog with a name. */ public ShowResultSet showCatalogs(ShowCatalogStmt showStmt) throws AnalysisException { List<List<String>> rows = Lists.newArrayList(); readLock(); try { if (showStmt.getCatalogName() == null) { for (DataSourceIf ds : nameToCatalogs.values()) { List<String> row = Lists.newArrayList(); row.add(ds.getName()); row.add(ds.getType()); rows.add(row); } } else { if (!nameToCatalogs.containsKey(showStmt.getCatalogName())) { throw new AnalysisException("No catalog found with name: " + showStmt.getCatalogName()); } DataSourceIf ds = nameToCatalogs.get(showStmt.getCatalogName()); for (Map.Entry<String, String> elem : ds.getProperties().entrySet()) { List<String> row = Lists.newArrayList(); row.add(elem.getKey()); row.add(elem.getValue()); rows.add(row); } } } finally { readUnlock(); } return new ShowResultSet(showStmt.getMetaData(), rows); } /** * Reply for create catalog event. */ public void replayCreateCatalog(CatalogLog log) { writeLock(); try { DataSourceIf ds = CatalogFactory.constructorFromLog(log); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } } /** * Reply for drop catalog event. */ public void replayDropCatalog(CatalogLog log) { writeLock(); try { nameToCatalogs.remove(log.getCatalogName()); } finally { writeUnlock(); } } /** * Reply for alter catalog name event. */ /** * Reply for alter catalog props event. */ public void replayAlterCatalogProps(CatalogLog log) { writeLock(); try { DataSourceIf ds = nameToCatalogs.remove(log.getCatalogName()); ds.modifyDatasourceProps(log.getNewProps()); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } } @Override public void write(DataOutput out) throws IOException { Text.writeString(out, GsonUtils.GSON.toJson(this)); } public static DataSourceMgr read(DataInput in) throws IOException { String json = Text.readString(in); return GsonUtils.GSON.fromJson(json, DataSourceMgr.class); } }
class DataSourceMgr implements Writable { private static final Logger LOG = LogManager.getLogger(DataSourceMgr.class); private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true); private final Map<String, DataSourceIf> nameToCatalogs = Maps.newConcurrentMap(); private InternalDataSource internalDataSource; public DataSourceMgr() { initInternalDataSource(); } private void initInternalDataSource() { internalDataSource = new InternalDataSource(); nameToCatalogs.put(internalDataSource.getName(), internalDataSource); } public InternalDataSource getInternalDataSource() { return internalDataSource; } private void writeLock() { lock.writeLock().lock(); } private void writeUnlock() { lock.writeLock().unlock(); } private void readLock() { lock.readLock().lock(); } private void readUnlock() { lock.readLock().unlock(); } /** * Create and hold the catalog instance and write the meta log. */ public void createCatalog(CreateCatalogStmt stmt) throws UserException { if (stmt.isSetIfNotExists() && nameToCatalogs.containsKey(stmt.getCatalogName())) { LOG.warn("Catalog {} is already exist.", stmt.getCatalogName()); return; } if (nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("Catalog had already exist with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayCreateCatalog(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_CREATE_DS, log); } /** * Remove the catalog instance by name and write the meta log. */ public void dropCatalog(DropCatalogStmt stmt) throws UserException { if (stmt.isSetIfExists() && !nameToCatalogs.containsKey(stmt.getCatalogName())) { LOG.warn("Non catalog {} is found.", stmt.getCatalogName()); return; } if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayDropCatalog(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_DROP_DS, log); } /** * Modify the catalog name into a new one and write the meta log. */ public void alterCatalogName(AlterCatalogNameStmt stmt) throws UserException { if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayAlterCatalogName(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_ALTER_DS_NAME, log); } /** * Modify the catalog property and write the meta log. */ public void alterCatalogProps(AlterCatalogPropertyStmt stmt) throws UserException { if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } if (!nameToCatalogs.get(stmt.getCatalogName()) .getType().equalsIgnoreCase(stmt.getNewProperties().get("type"))) { throw new DdlException("Can't modify the type of catalog property with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayAlterCatalogProps(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_ALTER_DS_PROPS, log); } /** * List all catalog or get the special catalog with a name. */ public ShowResultSet showCatalogs(ShowCatalogStmt showStmt) throws AnalysisException { List<List<String>> rows = Lists.newArrayList(); readLock(); try { if (showStmt.getCatalogName() == null) { for (DataSourceIf ds : nameToCatalogs.values()) { List<String> row = Lists.newArrayList(); row.add(ds.getName()); row.add(ds.getType()); rows.add(row); } } else { if (!nameToCatalogs.containsKey(showStmt.getCatalogName())) { throw new AnalysisException("No catalog found with name: " + showStmt.getCatalogName()); } DataSourceIf ds = nameToCatalogs.get(showStmt.getCatalogName()); for (Map.Entry<String, String> elem : ds.getProperties().entrySet()) { List<String> row = Lists.newArrayList(); row.add(elem.getKey()); row.add(elem.getValue()); rows.add(row); } } } finally { readUnlock(); } return new ShowResultSet(showStmt.getMetaData(), rows); } /** * Reply for create catalog event. */ public void replayCreateCatalog(CatalogLog log) { writeLock(); try { DataSourceIf ds = CatalogFactory.constructorFromLog(log); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } } /** * Reply for drop catalog event. */ public void replayDropCatalog(CatalogLog log) { writeLock(); try { nameToCatalogs.remove(log.getCatalogName()); } finally { writeUnlock(); } } /** * Reply for alter catalog name event. */ /** * Reply for alter catalog props event. */ public void replayAlterCatalogProps(CatalogLog log) { writeLock(); try { DataSourceIf ds = nameToCatalogs.remove(log.getCatalogName()); ds.modifyDatasourceProps(log.getNewProps()); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } } @Override public void write(DataOutput out) throws IOException { Text.writeString(out, GsonUtils.GSON.toJson(this)); } public static DataSourceMgr read(DataInput in) throws IOException { String json = Text.readString(in); return GsonUtils.GSON.fromJson(json, DataSourceMgr.class); } }
Better now?
private Optional<Double> averageLoad(Resource resource, List<Node> clusterNodes, ClusterSpec.Type clusterType) { NodeMetricsDb.Window window = metricsDb.getWindow(nodeRepository.clock().instant().minus(scalingWindow(clusterType)), resource, clusterNodes.stream().map(Node::hostname).collect(Collectors.toList())); if (window.measurementCount()/clusterNodes.size() < minimumMeasurementsPerNode) return Optional.empty(); if (window.hostnames() != clusterNodes.size()) return Optional.empty(); return Optional.of(window.average()); }
private Optional<Double> averageLoad(Resource resource, List<Node> clusterNodes, ClusterSpec.Type clusterType) { NodeMetricsDb.Window window = metricsDb.getWindow(nodeRepository.clock().instant().minus(scalingWindow(clusterType)), resource, clusterNodes.stream().map(Node::hostname).collect(Collectors.toList())); if (window.measurementCount()/clusterNodes.size() < minimumMeasurementsPerNode) return Optional.empty(); if (window.hostnames() != clusterNodes.size()) return Optional.empty(); return Optional.of(window.average()); }
class Autoscaler { private static final int minimumMeasurementsPerNode = 60; /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final NodeMetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(NodeMetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return a new suggested allocation for this cluster, or empty if it should not be rescaled at this time */ public Optional<ClusterResources> suggest(Cluster cluster, List<Node> clusterNodes) { return autoscale(clusterNodes, Limits.empty(), cluster.exclusive()) .map(AllocatableClusterResources::toAdvertisedClusterResources); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return a new suggested allocation for this cluster, or empty if it should not be rescaled at this time */ public Optional<ClusterResources> autoscale(Cluster cluster, List<Node> clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Optional.empty(); return autoscale(clusterNodes, Limits.of(cluster), cluster.exclusive()) .map(AllocatableClusterResources::toAdvertisedClusterResources); } private Optional<AllocatableClusterResources> autoscale(List<Node> clusterNodes, Limits limits, boolean exclusive) { if (unstable(clusterNodes)) return Optional.empty(); ClusterSpec.Type clusterType = clusterNodes.get(0).allocation().get().membership().cluster().type(); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes, nodeRepository); Optional<Double> cpuLoad = averageLoad(Resource.cpu, clusterNodes, clusterType); Optional<Double> memoryLoad = averageLoad(Resource.memory, clusterNodes, clusterType); Optional<Double> diskLoad = averageLoad(Resource.disk, clusterNodes, clusterType); if (cpuLoad.isEmpty() || memoryLoad.isEmpty() || diskLoad.isEmpty()) return Optional.empty(); var target = ResourceTarget.idealLoad(cpuLoad.get(), memoryLoad.get(), diskLoad.get(), currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Optional.empty(); if (similar(bestAllocation.get(), currentAllocation)) return Optional.empty(); return bestAllocation; } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / r1 < threshold; } /** * Returns the average load of this resource in the measurement window, * or empty if we are not in a position to make decisions from these measurements at this time. */ /** The duration of the window we need to consider to make a scaling decision */ private Duration scalingWindow(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return Duration.ofHours(12); return Duration.ofHours(12); } public static boolean unstable(List<Node> nodes) { return nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable()); } }
class Autoscaler { private static final int minimumMeasurementsPerNode = 60; /** What cost difference factor is worth a reallocation? */ private static final double costDifferenceWorthReallocation = 0.1; /** What difference factor for a resource is worth a reallocation? */ private static final double resourceDifferenceWorthReallocation = 0.1; private final NodeMetricsDb metricsDb; private final NodeRepository nodeRepository; private final AllocationOptimizer allocationOptimizer; public Autoscaler(NodeMetricsDb metricsDb, NodeRepository nodeRepository) { this.metricsDb = metricsDb; this.nodeRepository = nodeRepository; this.allocationOptimizer = new AllocationOptimizer(nodeRepository); } /** * Suggest a scaling of a cluster. This returns a better allocation (if found) * without taking min and max limits into account. * * @param clusterNodes the list of all the active nodes in a cluster * @return a new suggested allocation for this cluster, or empty if it should not be rescaled at this time */ public Optional<ClusterResources> suggest(Cluster cluster, List<Node> clusterNodes) { return autoscale(clusterNodes, Limits.empty(), cluster.exclusive()) .map(AllocatableClusterResources::toAdvertisedClusterResources); } /** * Autoscale a cluster by load. This returns a better allocation (if found) inside the min and max limits. * * @param clusterNodes the list of all the active nodes in a cluster * @return a new suggested allocation for this cluster, or empty if it should not be rescaled at this time */ public Optional<ClusterResources> autoscale(Cluster cluster, List<Node> clusterNodes) { if (cluster.minResources().equals(cluster.maxResources())) return Optional.empty(); return autoscale(clusterNodes, Limits.of(cluster), cluster.exclusive()) .map(AllocatableClusterResources::toAdvertisedClusterResources); } private Optional<AllocatableClusterResources> autoscale(List<Node> clusterNodes, Limits limits, boolean exclusive) { if (unstable(clusterNodes)) return Optional.empty(); ClusterSpec.Type clusterType = clusterNodes.get(0).allocation().get().membership().cluster().type(); AllocatableClusterResources currentAllocation = new AllocatableClusterResources(clusterNodes, nodeRepository); Optional<Double> cpuLoad = averageLoad(Resource.cpu, clusterNodes, clusterType); Optional<Double> memoryLoad = averageLoad(Resource.memory, clusterNodes, clusterType); Optional<Double> diskLoad = averageLoad(Resource.disk, clusterNodes, clusterType); if (cpuLoad.isEmpty() || memoryLoad.isEmpty() || diskLoad.isEmpty()) return Optional.empty(); var target = ResourceTarget.idealLoad(cpuLoad.get(), memoryLoad.get(), diskLoad.get(), currentAllocation); Optional<AllocatableClusterResources> bestAllocation = allocationOptimizer.findBestAllocation(target, currentAllocation, limits, exclusive); if (bestAllocation.isEmpty()) return Optional.empty(); if (similar(bestAllocation.get(), currentAllocation)) return Optional.empty(); return bestAllocation; } /** Returns true if both total real resources and total cost are similar */ private boolean similar(AllocatableClusterResources a, AllocatableClusterResources b) { return similar(a.cost(), b.cost(), costDifferenceWorthReallocation) && similar(a.realResources().vcpu() * a.nodes(), b.realResources().vcpu() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().memoryGb() * a.nodes(), b.realResources().memoryGb() * b.nodes(), resourceDifferenceWorthReallocation) && similar(a.realResources().diskGb() * a.nodes(), b.realResources().diskGb() * b.nodes(), resourceDifferenceWorthReallocation); } private boolean similar(double r1, double r2, double threshold) { return Math.abs(r1 - r2) / r1 < threshold; } /** * Returns the average load of this resource in the measurement window, * or empty if we are not in a position to make decisions from these measurements at this time. */ /** The duration of the window we need to consider to make a scaling decision */ private Duration scalingWindow(ClusterSpec.Type clusterType) { if (clusterType.isContent()) return Duration.ofHours(12); return Duration.ofHours(12); } public static boolean unstable(List<Node> nodes) { return nodes.stream().anyMatch(node -> node.status().wantToRetire() || node.allocation().get().membership().retired() || node.allocation().get().isRemovable()); } }
Did you intend for this to be `sendBlocking`, as we discussed?
private Result sendMessage(DocumentOperationMessageV3 msg) { msg.getMessage().pushHandler(feedReplyHandler); return sourceSession.getResource().sendMessage(msg.getMessage()); }
return sourceSession.getResource().sendMessage(msg.getMessage());
private Result sendMessage(DocumentOperationMessageV3 msg) { msg.getMessage().pushHandler(feedReplyHandler); return sourceSession.getResource().sendMessage(msg.getMessage()); }
class ClientFeederV3 { protected static final Logger log = Logger.getLogger(ClientFeederV3.class.getName()); private final static AtomicInteger outstandingOperations = new AtomicInteger(0); private final BlockingQueue<OperationStatus> feedReplies = new LinkedBlockingQueue<>(); private final ReferencedResource<SharedSourceSession> sourceSession; private final String clientId; private final ReplyHandler feedReplyHandler; private final Metric metric; private final HttpThrottlePolicy httpThrottlePolicy; private Instant prevOpsPerSecTime = Instant.now(); private double operationsForOpsPerSec = 0d; private final Object monitor = new Object(); private final StreamReaderV3 streamReaderV3; private final AtomicInteger ongoingRequests = new AtomicInteger(0); private final String hostName; ClientFeederV3( ReferencedResource<SharedSourceSession> sourceSession, FeedReaderFactory feedReaderFactory, DocumentTypeManager docTypeManager, String clientId, Metric metric, ReplyHandler feedReplyHandler, HttpThrottlePolicy httpThrottlePolicy) { this.sourceSession = sourceSession; this.clientId = clientId; this.feedReplyHandler = feedReplyHandler; this.metric = metric; this.httpThrottlePolicy = httpThrottlePolicy; this.streamReaderV3 = new StreamReaderV3(feedReaderFactory, docTypeManager); this.hostName = HostName.getLocalhost(); } public boolean timedOut() { synchronized (monitor) { return Instant.now().isAfter(prevOpsPerSecTime.plusSeconds(6000)) && ongoingRequests.get() == 0; } } public void kill() { while (ongoingRequests.get() > 0) { try { ongoingRequests.wait(100); } catch (InterruptedException e) { break; } } sourceSession.getReference().close(); } private void transferPreviousRepliesToResponse(BlockingQueue<OperationStatus> operations) throws InterruptedException { OperationStatus status = feedReplies.poll(); while (status != null) { outstandingOperations.decrementAndGet(); operations.put(status); status = feedReplies.poll(); } } public HttpResponse handleRequest(HttpRequest request) throws IOException { ongoingRequests.incrementAndGet(); try { FeederSettings feederSettings = new FeederSettings(request); if (httpThrottlePolicy.shouldThrottle()) { return new ErrorHttpResponse(getOverloadReturnCode(request), "Gateway overloaded"); } InputStream inputStream = StreamReaderV3.unzipStreamIfNeeded(request); BlockingQueue<OperationStatus> replies = new LinkedBlockingQueue<>(); try { feed(feederSettings, inputStream, replies); synchronized (monitor) { if (request.getJDiscRequest().headers().get(Headers.DATA_FORMAT) != null) { transferPreviousRepliesToResponse(replies); } } } catch (InterruptedException e) { } catch (Throwable e) { log.log(Level.WARNING, "Unhandled exception while feeding: " + Exceptions.toMessageString(e), e); } finally { replies.add(createOperationStatus("-", "-", ErrorCode.END_OF_FEED, null)); } return new FeedResponse(200, replies, 3, clientId, outstandingOperations.get(), hostName); } finally { ongoingRequests.decrementAndGet(); } } private int getOverloadReturnCode(HttpRequest request) { if (request.getHeader(Headers.SILENTUPGRADE) != null ) return 299; return 429; } private Optional<DocumentOperationMessageV3> pullMessageFromRequest(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<String> operationId; try { operationId = streamReaderV3.getNextOperationId(requestInputStream); if (operationId.isEmpty()) return Optional.empty(); } catch (IOException ioe) { log.log(Level.FINE, () -> Exceptions.toMessageString(ioe)); return Optional.empty(); } try { DocumentOperationMessageV3 message = getNextMessage(operationId.get(), requestInputStream, settings); if (message != null) setRoute(message, settings); return Optional.ofNullable(message); } catch (Exception e) { log.log(Level.WARNING, () -> Exceptions.toMessageString(e)); metric.add(MetricNames.PARSE_ERROR, 1, null); repliesFromOldMessages.add(new OperationStatus(Exceptions.toMessageString(e), operationId.get(), ErrorCode.ERROR, false, "")); } } } private void feed(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<DocumentOperationMessageV3> message = pullMessageFromRequest(settings, requestInputStream, repliesFromOldMessages); if (message.isEmpty()) break; setMessageParameters(message.get(), settings); Result result; try { result = sendMessage(message.get()); } catch (RuntimeException e) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), Exceptions.toMessageString(e), ErrorCode.ERROR, message.get().getMessage())); continue; } if (result.isAccepted()) { outstandingOperations.incrementAndGet(); updateOpsPerSec(); log(Level.FINE, "Sent message successfully, document id: ", message.get().getOperationId()); } else if (!result.getError().isFatal()) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.TRANSIENT_ERROR, message.get().getMessage())); } else { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.ERROR, message.get().getMessage())); } } } private OperationStatus createOperationStatus(String id, String message, ErrorCode code, Message msg) { String traceMessage = msg != null && msg.getTrace() != null && msg.getTrace().getLevel() > 0 ? msg.getTrace().toString() : ""; return new OperationStatus(message, id, code, false, traceMessage); } /** Returns the next message in the stream, or null if none */ protected DocumentOperationMessageV3 getNextMessage(String operationId, InputStream requestInputStream, FeederSettings settings) throws Exception { FeedOperation operation = streamReaderV3.getNextOperation(requestInputStream, settings); if (sourceSession.getResource().session() != null) { metric.set( MetricNames.PENDING, Double.valueOf(sourceSession.getResource().session().getPendingCount()), null); } DocumentOperationMessageV3 message = DocumentOperationMessageV3.create(operation, operationId, metric); if (message == null) { return null; } metric.add(MetricNames.NUM_OPERATIONS, 1, null /*metricContext*/); log(Level.FINE, "Successfully deserialized document id: ", message.getOperationId()); return message; } private void setMessageParameters(DocumentOperationMessageV3 msg, FeederSettings settings) { msg.getMessage().setContext(new ReplyContext(msg.getOperationId(), feedReplies)); if (settings.traceLevel != null) { msg.getMessage().getTrace().setLevel(settings.traceLevel); } if (settings.priority != null) { try { DocumentProtocol.Priority priority = DocumentProtocol.Priority.valueOf(settings.priority); if (msg.getMessage() instanceof DocumentMessage) { ((DocumentMessage) msg.getMessage()).setPriority(priority); } } catch (IllegalArgumentException i) { log.severe(i.getMessage()); } } } private void setRoute(DocumentOperationMessageV3 msg, FeederSettings settings) { if (settings.route != null) { msg.getMessage().setRoute(settings.route); } } protected final void log(Level level, Object... msgParts) { if (!log.isLoggable(level)) return; StringBuilder s = new StringBuilder(); for (Object part : msgParts) s.append(part.toString()); log.log(level, s.toString()); } private void updateOpsPerSec() { Instant now = Instant.now(); synchronized (monitor) { if (now.plusSeconds(1).isAfter(prevOpsPerSecTime)) { Duration duration = Duration.between(now, prevOpsPerSecTime); double opsPerSec = operationsForOpsPerSec / (duration.toMillis() / 1000.); metric.set(MetricNames.OPERATIONS_PER_SEC, opsPerSec, null /*metricContext*/); operationsForOpsPerSec = 1.0d; prevOpsPerSecTime = now; } else { operationsForOpsPerSec += 1.0d; } } } /* * The gateway handle overload from clients in different ways. * * If the backend is overloaded, but not the gateway, it will fill the backend, messagebus throttler * will start to block new documents and finally all threadsAvailableForFeeding will be blocking. * However, as more threads are added, the gateway will not block on messagebus but return * transitive errors on the documents that can not be processed. These errors will cause the client(s) to * back off a bit. * * However, we can also have the case that the gateway becomes the bottleneck (e.g. CPU). In this case * we need to stop processing of new messages as early as possible and reject the request. This * will cause the client(s) to back off for a while. We want some slack before we enter this mode. * If we can simply transitively fail each document, it is nicer. Therefor we allow some threads to be * busy processing requests with transitive errors before entering this mode. Since we already * have flooded the backend, have several threads hanging and waiting for capacity, the number should * not be very large. Too much slack can lead to too many threads handling feed and impacting query traffic. */ interface HttpThrottlePolicy { boolean shouldThrottle(); } }
class ClientFeederV3 { protected static final Logger log = Logger.getLogger(ClientFeederV3.class.getName()); private final static AtomicInteger outstandingOperations = new AtomicInteger(0); private final BlockingQueue<OperationStatus> feedReplies = new LinkedBlockingQueue<>(); private final ReferencedResource<SharedSourceSession> sourceSession; private final String clientId; private final ReplyHandler feedReplyHandler; private final Metric metric; private final HttpThrottlePolicy httpThrottlePolicy; private Instant prevOpsPerSecTime = Instant.now(); private double operationsForOpsPerSec = 0d; private final Object monitor = new Object(); private final StreamReaderV3 streamReaderV3; private final AtomicInteger ongoingRequests = new AtomicInteger(0); private final String hostName; ClientFeederV3( ReferencedResource<SharedSourceSession> sourceSession, FeedReaderFactory feedReaderFactory, DocumentTypeManager docTypeManager, String clientId, Metric metric, ReplyHandler feedReplyHandler, HttpThrottlePolicy httpThrottlePolicy) { this.sourceSession = sourceSession; this.clientId = clientId; this.feedReplyHandler = feedReplyHandler; this.metric = metric; this.httpThrottlePolicy = httpThrottlePolicy; this.streamReaderV3 = new StreamReaderV3(feedReaderFactory, docTypeManager); this.hostName = HostName.getLocalhost(); } public boolean timedOut() { synchronized (monitor) { return Instant.now().isAfter(prevOpsPerSecTime.plusSeconds(6000)) && ongoingRequests.get() == 0; } } public void kill() { while (ongoingRequests.get() > 0) { try { ongoingRequests.wait(100); } catch (InterruptedException e) { break; } } sourceSession.getReference().close(); } private void transferPreviousRepliesToResponse(BlockingQueue<OperationStatus> operations) throws InterruptedException { OperationStatus status = feedReplies.poll(); while (status != null) { outstandingOperations.decrementAndGet(); operations.put(status); status = feedReplies.poll(); } } public HttpResponse handleRequest(HttpRequest request) throws IOException { ongoingRequests.incrementAndGet(); try { FeederSettings feederSettings = new FeederSettings(request); if (httpThrottlePolicy.shouldThrottle()) { return new ErrorHttpResponse(getOverloadReturnCode(request), "Gateway overloaded"); } InputStream inputStream = StreamReaderV3.unzipStreamIfNeeded(request); BlockingQueue<OperationStatus> replies = new LinkedBlockingQueue<>(); try { feed(feederSettings, inputStream, replies); synchronized (monitor) { if (request.getJDiscRequest().headers().get(Headers.DATA_FORMAT) != null) { transferPreviousRepliesToResponse(replies); } } } catch (InterruptedException e) { } catch (Throwable e) { log.log(Level.WARNING, "Unhandled exception while feeding: " + Exceptions.toMessageString(e), e); } finally { replies.add(createOperationStatus("-", "-", ErrorCode.END_OF_FEED, null)); } return new FeedResponse(200, replies, 3, clientId, outstandingOperations.get(), hostName); } finally { ongoingRequests.decrementAndGet(); } } private int getOverloadReturnCode(HttpRequest request) { if (request.getHeader(Headers.SILENTUPGRADE) != null ) return 299; return 429; } private Optional<DocumentOperationMessageV3> pullMessageFromRequest(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<String> operationId; try { operationId = streamReaderV3.getNextOperationId(requestInputStream); if (operationId.isEmpty()) return Optional.empty(); } catch (IOException ioe) { log.log(Level.FINE, () -> Exceptions.toMessageString(ioe)); return Optional.empty(); } try { DocumentOperationMessageV3 message = getNextMessage(operationId.get(), requestInputStream, settings); if (message != null) setRoute(message, settings); return Optional.ofNullable(message); } catch (Exception e) { log.log(Level.WARNING, () -> Exceptions.toMessageString(e)); metric.add(MetricNames.PARSE_ERROR, 1, null); repliesFromOldMessages.add(new OperationStatus(Exceptions.toMessageString(e), operationId.get(), ErrorCode.ERROR, false, "")); } } } private void feed(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<DocumentOperationMessageV3> message = pullMessageFromRequest(settings, requestInputStream, repliesFromOldMessages); if (message.isEmpty()) break; setMessageParameters(message.get(), settings); Result result; try { result = sendMessage(message.get()); } catch (RuntimeException e) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), Exceptions.toMessageString(e), ErrorCode.ERROR, message.get().getMessage())); continue; } if (result.isAccepted()) { outstandingOperations.incrementAndGet(); updateOpsPerSec(); log(Level.FINE, "Sent message successfully, document id: ", message.get().getOperationId()); } else if (!result.getError().isFatal()) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.TRANSIENT_ERROR, message.get().getMessage())); } else { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.ERROR, message.get().getMessage())); } } } private OperationStatus createOperationStatus(String id, String message, ErrorCode code, Message msg) { String traceMessage = msg != null && msg.getTrace() != null && msg.getTrace().getLevel() > 0 ? msg.getTrace().toString() : ""; return new OperationStatus(message, id, code, false, traceMessage); } /** Returns the next message in the stream, or null if none */ protected DocumentOperationMessageV3 getNextMessage(String operationId, InputStream requestInputStream, FeederSettings settings) throws Exception { FeedOperation operation = streamReaderV3.getNextOperation(requestInputStream, settings); if (sourceSession.getResource().session() != null) { metric.set( MetricNames.PENDING, Double.valueOf(sourceSession.getResource().session().getPendingCount()), null); } DocumentOperationMessageV3 message = DocumentOperationMessageV3.create(operation, operationId, metric); if (message == null) { return null; } metric.add(MetricNames.NUM_OPERATIONS, 1, null /*metricContext*/); log(Level.FINE, "Successfully deserialized document id: ", message.getOperationId()); return message; } private void setMessageParameters(DocumentOperationMessageV3 msg, FeederSettings settings) { msg.getMessage().setContext(new ReplyContext(msg.getOperationId(), feedReplies)); if (settings.traceLevel != null) { msg.getMessage().getTrace().setLevel(settings.traceLevel); } if (settings.priority != null) { try { DocumentProtocol.Priority priority = DocumentProtocol.Priority.valueOf(settings.priority); if (msg.getMessage() instanceof DocumentMessage) { ((DocumentMessage) msg.getMessage()).setPriority(priority); } } catch (IllegalArgumentException i) { log.severe(i.getMessage()); } } } private void setRoute(DocumentOperationMessageV3 msg, FeederSettings settings) { if (settings.route != null) { msg.getMessage().setRoute(settings.route); } } protected final void log(Level level, Object... msgParts) { if (!log.isLoggable(level)) return; StringBuilder s = new StringBuilder(); for (Object part : msgParts) s.append(part.toString()); log.log(level, s.toString()); } private void updateOpsPerSec() { Instant now = Instant.now(); synchronized (monitor) { if (now.plusSeconds(1).isAfter(prevOpsPerSecTime)) { Duration duration = Duration.between(now, prevOpsPerSecTime); double opsPerSec = operationsForOpsPerSec / (duration.toMillis() / 1000.); metric.set(MetricNames.OPERATIONS_PER_SEC, opsPerSec, null /*metricContext*/); operationsForOpsPerSec = 1.0d; prevOpsPerSecTime = now; } else { operationsForOpsPerSec += 1.0d; } } } /* * The gateway handle overload from clients in different ways. * * If the backend is overloaded, but not the gateway, it will fill the backend, messagebus throttler * will start to block new documents and finally all threadsAvailableForFeeding will be blocking. * However, as more threads are added, the gateway will not block on messagebus but return * transitive errors on the documents that can not be processed. These errors will cause the client(s) to * back off a bit. * * However, we can also have the case that the gateway becomes the bottleneck (e.g. CPU). In this case * we need to stop processing of new messages as early as possible and reject the request. This * will cause the client(s) to back off for a while. We want some slack before we enter this mode. * If we can simply transitively fail each document, it is nicer. Therefor we allow some threads to be * busy processing requests with transitive errors before entering this mode. Since we already * have flooded the backend, have several threads hanging and waiting for capacity, the number should * not be very large. Too much slack can lead to too many threads handling feed and impacting query traffic. */ interface HttpThrottlePolicy { boolean shouldThrottle(); } }
👍
private void setMessageParameters(DocumentOperationMessageV3 msg, FeederSettings settings) { msg.getMessage().setContext(new ReplyContext(msg.getOperationId(), feedReplies)); if (settings.traceLevel != null) { msg.getMessage().getTrace().setLevel(settings.traceLevel); } if (settings.priority != null) { try { DocumentProtocol.Priority priority = DocumentProtocol.Priority.valueOf(settings.priority); if (msg.getMessage() instanceof DocumentMessage) { ((DocumentMessage) msg.getMessage()).setPriority(priority); } } catch (IllegalArgumentException i) { log.severe(i.getMessage()); } } }
if (settings.traceLevel != null) {
private void setMessageParameters(DocumentOperationMessageV3 msg, FeederSettings settings) { msg.getMessage().setContext(new ReplyContext(msg.getOperationId(), feedReplies)); if (settings.traceLevel != null) { msg.getMessage().getTrace().setLevel(settings.traceLevel); } if (settings.priority != null) { try { DocumentProtocol.Priority priority = DocumentProtocol.Priority.valueOf(settings.priority); if (msg.getMessage() instanceof DocumentMessage) { ((DocumentMessage) msg.getMessage()).setPriority(priority); } } catch (IllegalArgumentException i) { log.severe(i.getMessage()); } } }
class ClientFeederV3 { protected static final Logger log = Logger.getLogger(ClientFeederV3.class.getName()); private final static AtomicInteger outstandingOperations = new AtomicInteger(0); private final BlockingQueue<OperationStatus> feedReplies = new LinkedBlockingQueue<>(); private final ReferencedResource<SharedSourceSession> sourceSession; private final String clientId; private final ReplyHandler feedReplyHandler; private final Metric metric; private final HttpThrottlePolicy httpThrottlePolicy; private Instant prevOpsPerSecTime = Instant.now(); private double operationsForOpsPerSec = 0d; private final Object monitor = new Object(); private final StreamReaderV3 streamReaderV3; private final AtomicInteger ongoingRequests = new AtomicInteger(0); private final String hostName; ClientFeederV3( ReferencedResource<SharedSourceSession> sourceSession, FeedReaderFactory feedReaderFactory, DocumentTypeManager docTypeManager, String clientId, Metric metric, ReplyHandler feedReplyHandler, HttpThrottlePolicy httpThrottlePolicy) { this.sourceSession = sourceSession; this.clientId = clientId; this.feedReplyHandler = feedReplyHandler; this.metric = metric; this.httpThrottlePolicy = httpThrottlePolicy; this.streamReaderV3 = new StreamReaderV3(feedReaderFactory, docTypeManager); this.hostName = HostName.getLocalhost(); } public boolean timedOut() { synchronized (monitor) { return Instant.now().isAfter(prevOpsPerSecTime.plusSeconds(6000)) && ongoingRequests.get() == 0; } } public void kill() { while (ongoingRequests.get() > 0) { try { ongoingRequests.wait(100); } catch (InterruptedException e) { break; } } sourceSession.getReference().close(); } private void transferPreviousRepliesToResponse(BlockingQueue<OperationStatus> operations) throws InterruptedException { OperationStatus status = feedReplies.poll(); while (status != null) { outstandingOperations.decrementAndGet(); operations.put(status); status = feedReplies.poll(); } } public HttpResponse handleRequest(HttpRequest request) throws IOException { ongoingRequests.incrementAndGet(); try { FeederSettings feederSettings = new FeederSettings(request); if (httpThrottlePolicy.shouldThrottle()) { return new ErrorHttpResponse(getOverloadReturnCode(request), "Gateway overloaded"); } InputStream inputStream = StreamReaderV3.unzipStreamIfNeeded(request); BlockingQueue<OperationStatus> replies = new LinkedBlockingQueue<>(); try { feed(feederSettings, inputStream, replies); synchronized (monitor) { if (request.getJDiscRequest().headers().get(Headers.DATA_FORMAT) != null) { transferPreviousRepliesToResponse(replies); } } } catch (InterruptedException e) { } catch (Throwable e) { log.log(Level.WARNING, "Unhandled exception while feeding: " + Exceptions.toMessageString(e), e); } finally { replies.add(createOperationStatus("-", "-", ErrorCode.END_OF_FEED, null)); } return new FeedResponse(200, replies, 3, clientId, outstandingOperations.get(), hostName); } finally { ongoingRequests.decrementAndGet(); } } private int getOverloadReturnCode(HttpRequest request) { if (request.getHeader(Headers.SILENTUPGRADE) != null ) return 299; return 429; } private Optional<DocumentOperationMessageV3> pullMessageFromRequest(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<String> operationId; try { operationId = streamReaderV3.getNextOperationId(requestInputStream); if (operationId.isEmpty()) return Optional.empty(); } catch (IOException ioe) { log.log(Level.FINE, () -> Exceptions.toMessageString(ioe)); return Optional.empty(); } try { DocumentOperationMessageV3 message = getNextMessage(operationId.get(), requestInputStream, settings); if (message != null) setRoute(message, settings); return Optional.ofNullable(message); } catch (Exception e) { log.log(Level.WARNING, () -> Exceptions.toMessageString(e)); metric.add(MetricNames.PARSE_ERROR, 1, null); repliesFromOldMessages.add(new OperationStatus(Exceptions.toMessageString(e), operationId.get(), ErrorCode.ERROR, false, "")); } } } private Result sendMessage(DocumentOperationMessageV3 msg) { msg.getMessage().pushHandler(feedReplyHandler); return sourceSession.getResource().sendMessage(msg.getMessage()); } private void feed(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<DocumentOperationMessageV3> message = pullMessageFromRequest(settings, requestInputStream, repliesFromOldMessages); if (message.isEmpty()) break; setMessageParameters(message.get(), settings); Result result; try { result = sendMessage(message.get()); } catch (RuntimeException e) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), Exceptions.toMessageString(e), ErrorCode.ERROR, message.get().getMessage())); continue; } if (result.isAccepted()) { outstandingOperations.incrementAndGet(); updateOpsPerSec(); log(Level.FINE, "Sent message successfully, document id: ", message.get().getOperationId()); } else if (!result.getError().isFatal()) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.TRANSIENT_ERROR, message.get().getMessage())); } else { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.ERROR, message.get().getMessage())); } } } private OperationStatus createOperationStatus(String id, String message, ErrorCode code, Message msg) { String traceMessage = msg != null && msg.getTrace() != null && msg.getTrace().getLevel() > 0 ? msg.getTrace().toString() : ""; return new OperationStatus(message, id, code, false, traceMessage); } /** Returns the next message in the stream, or null if none */ protected DocumentOperationMessageV3 getNextMessage(String operationId, InputStream requestInputStream, FeederSettings settings) throws Exception { FeedOperation operation = streamReaderV3.getNextOperation(requestInputStream, settings); if (sourceSession.getResource().session() != null) { metric.set( MetricNames.PENDING, Double.valueOf(sourceSession.getResource().session().getPendingCount()), null); } DocumentOperationMessageV3 message = DocumentOperationMessageV3.create(operation, operationId, metric); if (message == null) { return null; } metric.add(MetricNames.NUM_OPERATIONS, 1, null /*metricContext*/); log(Level.FINE, "Successfully deserialized document id: ", message.getOperationId()); return message; } private void setRoute(DocumentOperationMessageV3 msg, FeederSettings settings) { if (settings.route != null) { msg.getMessage().setRoute(settings.route); } } protected final void log(Level level, Object... msgParts) { if (!log.isLoggable(level)) return; StringBuilder s = new StringBuilder(); for (Object part : msgParts) s.append(part.toString()); log.log(level, s.toString()); } private void updateOpsPerSec() { Instant now = Instant.now(); synchronized (monitor) { if (now.plusSeconds(1).isAfter(prevOpsPerSecTime)) { Duration duration = Duration.between(now, prevOpsPerSecTime); double opsPerSec = operationsForOpsPerSec / (duration.toMillis() / 1000.); metric.set(MetricNames.OPERATIONS_PER_SEC, opsPerSec, null /*metricContext*/); operationsForOpsPerSec = 1.0d; prevOpsPerSecTime = now; } else { operationsForOpsPerSec += 1.0d; } } } /* * The gateway handle overload from clients in different ways. * * If the backend is overloaded, but not the gateway, it will fill the backend, messagebus throttler * will start to block new documents and finally all threadsAvailableForFeeding will be blocking. * However, as more threads are added, the gateway will not block on messagebus but return * transitive errors on the documents that can not be processed. These errors will cause the client(s) to * back off a bit. * * However, we can also have the case that the gateway becomes the bottleneck (e.g. CPU). In this case * we need to stop processing of new messages as early as possible and reject the request. This * will cause the client(s) to back off for a while. We want some slack before we enter this mode. * If we can simply transitively fail each document, it is nicer. Therefor we allow some threads to be * busy processing requests with transitive errors before entering this mode. Since we already * have flooded the backend, have several threads hanging and waiting for capacity, the number should * not be very large. Too much slack can lead to too many threads handling feed and impacting query traffic. */ interface HttpThrottlePolicy { boolean shouldThrottle(); } }
class ClientFeederV3 { protected static final Logger log = Logger.getLogger(ClientFeederV3.class.getName()); private final static AtomicInteger outstandingOperations = new AtomicInteger(0); private final BlockingQueue<OperationStatus> feedReplies = new LinkedBlockingQueue<>(); private final ReferencedResource<SharedSourceSession> sourceSession; private final String clientId; private final ReplyHandler feedReplyHandler; private final Metric metric; private final HttpThrottlePolicy httpThrottlePolicy; private Instant prevOpsPerSecTime = Instant.now(); private double operationsForOpsPerSec = 0d; private final Object monitor = new Object(); private final StreamReaderV3 streamReaderV3; private final AtomicInteger ongoingRequests = new AtomicInteger(0); private final String hostName; ClientFeederV3( ReferencedResource<SharedSourceSession> sourceSession, FeedReaderFactory feedReaderFactory, DocumentTypeManager docTypeManager, String clientId, Metric metric, ReplyHandler feedReplyHandler, HttpThrottlePolicy httpThrottlePolicy) { this.sourceSession = sourceSession; this.clientId = clientId; this.feedReplyHandler = feedReplyHandler; this.metric = metric; this.httpThrottlePolicy = httpThrottlePolicy; this.streamReaderV3 = new StreamReaderV3(feedReaderFactory, docTypeManager); this.hostName = HostName.getLocalhost(); } public boolean timedOut() { synchronized (monitor) { return Instant.now().isAfter(prevOpsPerSecTime.plusSeconds(6000)) && ongoingRequests.get() == 0; } } public void kill() { while (ongoingRequests.get() > 0) { try { ongoingRequests.wait(100); } catch (InterruptedException e) { break; } } sourceSession.getReference().close(); } private void transferPreviousRepliesToResponse(BlockingQueue<OperationStatus> operations) throws InterruptedException { OperationStatus status = feedReplies.poll(); while (status != null) { outstandingOperations.decrementAndGet(); operations.put(status); status = feedReplies.poll(); } } public HttpResponse handleRequest(HttpRequest request) throws IOException { ongoingRequests.incrementAndGet(); try { FeederSettings feederSettings = new FeederSettings(request); if (httpThrottlePolicy.shouldThrottle()) { return new ErrorHttpResponse(getOverloadReturnCode(request), "Gateway overloaded"); } InputStream inputStream = StreamReaderV3.unzipStreamIfNeeded(request); BlockingQueue<OperationStatus> replies = new LinkedBlockingQueue<>(); try { feed(feederSettings, inputStream, replies); synchronized (monitor) { if (request.getJDiscRequest().headers().get(Headers.DATA_FORMAT) != null) { transferPreviousRepliesToResponse(replies); } } } catch (InterruptedException e) { } catch (Throwable e) { log.log(Level.WARNING, "Unhandled exception while feeding: " + Exceptions.toMessageString(e), e); } finally { replies.add(createOperationStatus("-", "-", ErrorCode.END_OF_FEED, null)); } return new FeedResponse(200, replies, 3, clientId, outstandingOperations.get(), hostName); } finally { ongoingRequests.decrementAndGet(); } } private int getOverloadReturnCode(HttpRequest request) { if (request.getHeader(Headers.SILENTUPGRADE) != null ) return 299; return 429; } private Optional<DocumentOperationMessageV3> pullMessageFromRequest(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<String> operationId; try { operationId = streamReaderV3.getNextOperationId(requestInputStream); if (operationId.isEmpty()) return Optional.empty(); } catch (IOException ioe) { log.log(Level.FINE, () -> Exceptions.toMessageString(ioe)); return Optional.empty(); } try { DocumentOperationMessageV3 message = getNextMessage(operationId.get(), requestInputStream, settings); if (message != null) setRoute(message, settings); return Optional.ofNullable(message); } catch (Exception e) { log.log(Level.WARNING, () -> Exceptions.toMessageString(e)); metric.add(MetricNames.PARSE_ERROR, 1, null); repliesFromOldMessages.add(new OperationStatus(Exceptions.toMessageString(e), operationId.get(), ErrorCode.ERROR, false, "")); } } } private Result sendMessage(DocumentOperationMessageV3 msg) { msg.getMessage().pushHandler(feedReplyHandler); return sourceSession.getResource().sendMessage(msg.getMessage()); } private void feed(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<DocumentOperationMessageV3> message = pullMessageFromRequest(settings, requestInputStream, repliesFromOldMessages); if (message.isEmpty()) break; setMessageParameters(message.get(), settings); Result result; try { result = sendMessage(message.get()); } catch (RuntimeException e) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), Exceptions.toMessageString(e), ErrorCode.ERROR, message.get().getMessage())); continue; } if (result.isAccepted()) { outstandingOperations.incrementAndGet(); updateOpsPerSec(); log(Level.FINE, "Sent message successfully, document id: ", message.get().getOperationId()); } else if (!result.getError().isFatal()) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.TRANSIENT_ERROR, message.get().getMessage())); } else { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.ERROR, message.get().getMessage())); } } } private OperationStatus createOperationStatus(String id, String message, ErrorCode code, Message msg) { String traceMessage = msg != null && msg.getTrace() != null && msg.getTrace().getLevel() > 0 ? msg.getTrace().toString() : ""; return new OperationStatus(message, id, code, false, traceMessage); } /** Returns the next message in the stream, or null if none */ protected DocumentOperationMessageV3 getNextMessage(String operationId, InputStream requestInputStream, FeederSettings settings) throws Exception { FeedOperation operation = streamReaderV3.getNextOperation(requestInputStream, settings); if (sourceSession.getResource().session() != null) { metric.set( MetricNames.PENDING, Double.valueOf(sourceSession.getResource().session().getPendingCount()), null); } DocumentOperationMessageV3 message = DocumentOperationMessageV3.create(operation, operationId, metric); if (message == null) { return null; } metric.add(MetricNames.NUM_OPERATIONS, 1, null /*metricContext*/); log(Level.FINE, "Successfully deserialized document id: ", message.getOperationId()); return message; } private void setRoute(DocumentOperationMessageV3 msg, FeederSettings settings) { if (settings.route != null) { msg.getMessage().setRoute(settings.route); } } protected final void log(Level level, Object... msgParts) { if (!log.isLoggable(level)) return; StringBuilder s = new StringBuilder(); for (Object part : msgParts) s.append(part.toString()); log.log(level, s.toString()); } private void updateOpsPerSec() { Instant now = Instant.now(); synchronized (monitor) { if (now.plusSeconds(1).isAfter(prevOpsPerSecTime)) { Duration duration = Duration.between(now, prevOpsPerSecTime); double opsPerSec = operationsForOpsPerSec / (duration.toMillis() / 1000.); metric.set(MetricNames.OPERATIONS_PER_SEC, opsPerSec, null /*metricContext*/); operationsForOpsPerSec = 1.0d; prevOpsPerSecTime = now; } else { operationsForOpsPerSec += 1.0d; } } } /* * The gateway handle overload from clients in different ways. * * If the backend is overloaded, but not the gateway, it will fill the backend, messagebus throttler * will start to block new documents and finally all threadsAvailableForFeeding will be blocking. * However, as more threads are added, the gateway will not block on messagebus but return * transitive errors on the documents that can not be processed. These errors will cause the client(s) to * back off a bit. * * However, we can also have the case that the gateway becomes the bottleneck (e.g. CPU). In this case * we need to stop processing of new messages as early as possible and reject the request. This * will cause the client(s) to back off for a while. We want some slack before we enter this mode. * If we can simply transitively fail each document, it is nicer. Therefor we allow some threads to be * busy processing requests with transitive errors before entering this mode. Since we already * have flooded the backend, have several threads hanging and waiting for capacity, the number should * not be very large. Too much slack can lead to too many threads handling feed and impacting query traffic. */ interface HttpThrottlePolicy { boolean shouldThrottle(); } }
NVM.
private Result sendMessage(DocumentOperationMessageV3 msg) { msg.getMessage().pushHandler(feedReplyHandler); return sourceSession.getResource().sendMessage(msg.getMessage()); }
return sourceSession.getResource().sendMessage(msg.getMessage());
private Result sendMessage(DocumentOperationMessageV3 msg) { msg.getMessage().pushHandler(feedReplyHandler); return sourceSession.getResource().sendMessage(msg.getMessage()); }
class ClientFeederV3 { protected static final Logger log = Logger.getLogger(ClientFeederV3.class.getName()); private final static AtomicInteger outstandingOperations = new AtomicInteger(0); private final BlockingQueue<OperationStatus> feedReplies = new LinkedBlockingQueue<>(); private final ReferencedResource<SharedSourceSession> sourceSession; private final String clientId; private final ReplyHandler feedReplyHandler; private final Metric metric; private final HttpThrottlePolicy httpThrottlePolicy; private Instant prevOpsPerSecTime = Instant.now(); private double operationsForOpsPerSec = 0d; private final Object monitor = new Object(); private final StreamReaderV3 streamReaderV3; private final AtomicInteger ongoingRequests = new AtomicInteger(0); private final String hostName; ClientFeederV3( ReferencedResource<SharedSourceSession> sourceSession, FeedReaderFactory feedReaderFactory, DocumentTypeManager docTypeManager, String clientId, Metric metric, ReplyHandler feedReplyHandler, HttpThrottlePolicy httpThrottlePolicy) { this.sourceSession = sourceSession; this.clientId = clientId; this.feedReplyHandler = feedReplyHandler; this.metric = metric; this.httpThrottlePolicy = httpThrottlePolicy; this.streamReaderV3 = new StreamReaderV3(feedReaderFactory, docTypeManager); this.hostName = HostName.getLocalhost(); } public boolean timedOut() { synchronized (monitor) { return Instant.now().isAfter(prevOpsPerSecTime.plusSeconds(6000)) && ongoingRequests.get() == 0; } } public void kill() { while (ongoingRequests.get() > 0) { try { ongoingRequests.wait(100); } catch (InterruptedException e) { break; } } sourceSession.getReference().close(); } private void transferPreviousRepliesToResponse(BlockingQueue<OperationStatus> operations) throws InterruptedException { OperationStatus status = feedReplies.poll(); while (status != null) { outstandingOperations.decrementAndGet(); operations.put(status); status = feedReplies.poll(); } } public HttpResponse handleRequest(HttpRequest request) throws IOException { ongoingRequests.incrementAndGet(); try { FeederSettings feederSettings = new FeederSettings(request); if (httpThrottlePolicy.shouldThrottle()) { return new ErrorHttpResponse(getOverloadReturnCode(request), "Gateway overloaded"); } InputStream inputStream = StreamReaderV3.unzipStreamIfNeeded(request); BlockingQueue<OperationStatus> replies = new LinkedBlockingQueue<>(); try { feed(feederSettings, inputStream, replies); synchronized (monitor) { if (request.getJDiscRequest().headers().get(Headers.DATA_FORMAT) != null) { transferPreviousRepliesToResponse(replies); } } } catch (InterruptedException e) { } catch (Throwable e) { log.log(Level.WARNING, "Unhandled exception while feeding: " + Exceptions.toMessageString(e), e); } finally { replies.add(createOperationStatus("-", "-", ErrorCode.END_OF_FEED, null)); } return new FeedResponse(200, replies, 3, clientId, outstandingOperations.get(), hostName); } finally { ongoingRequests.decrementAndGet(); } } private int getOverloadReturnCode(HttpRequest request) { if (request.getHeader(Headers.SILENTUPGRADE) != null ) return 299; return 429; } private Optional<DocumentOperationMessageV3> pullMessageFromRequest(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<String> operationId; try { operationId = streamReaderV3.getNextOperationId(requestInputStream); if (operationId.isEmpty()) return Optional.empty(); } catch (IOException ioe) { log.log(Level.FINE, () -> Exceptions.toMessageString(ioe)); return Optional.empty(); } try { DocumentOperationMessageV3 message = getNextMessage(operationId.get(), requestInputStream, settings); if (message != null) setRoute(message, settings); return Optional.ofNullable(message); } catch (Exception e) { log.log(Level.WARNING, () -> Exceptions.toMessageString(e)); metric.add(MetricNames.PARSE_ERROR, 1, null); repliesFromOldMessages.add(new OperationStatus(Exceptions.toMessageString(e), operationId.get(), ErrorCode.ERROR, false, "")); } } } private void feed(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<DocumentOperationMessageV3> message = pullMessageFromRequest(settings, requestInputStream, repliesFromOldMessages); if (message.isEmpty()) break; setMessageParameters(message.get(), settings); Result result; try { result = sendMessage(message.get()); } catch (RuntimeException e) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), Exceptions.toMessageString(e), ErrorCode.ERROR, message.get().getMessage())); continue; } if (result.isAccepted()) { outstandingOperations.incrementAndGet(); updateOpsPerSec(); log(Level.FINE, "Sent message successfully, document id: ", message.get().getOperationId()); } else if (!result.getError().isFatal()) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.TRANSIENT_ERROR, message.get().getMessage())); } else { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.ERROR, message.get().getMessage())); } } } private OperationStatus createOperationStatus(String id, String message, ErrorCode code, Message msg) { String traceMessage = msg != null && msg.getTrace() != null && msg.getTrace().getLevel() > 0 ? msg.getTrace().toString() : ""; return new OperationStatus(message, id, code, false, traceMessage); } /** Returns the next message in the stream, or null if none */ protected DocumentOperationMessageV3 getNextMessage(String operationId, InputStream requestInputStream, FeederSettings settings) throws Exception { FeedOperation operation = streamReaderV3.getNextOperation(requestInputStream, settings); if (sourceSession.getResource().session() != null) { metric.set( MetricNames.PENDING, Double.valueOf(sourceSession.getResource().session().getPendingCount()), null); } DocumentOperationMessageV3 message = DocumentOperationMessageV3.create(operation, operationId, metric); if (message == null) { return null; } metric.add(MetricNames.NUM_OPERATIONS, 1, null /*metricContext*/); log(Level.FINE, "Successfully deserialized document id: ", message.getOperationId()); return message; } private void setMessageParameters(DocumentOperationMessageV3 msg, FeederSettings settings) { msg.getMessage().setContext(new ReplyContext(msg.getOperationId(), feedReplies)); if (settings.traceLevel != null) { msg.getMessage().getTrace().setLevel(settings.traceLevel); } if (settings.priority != null) { try { DocumentProtocol.Priority priority = DocumentProtocol.Priority.valueOf(settings.priority); if (msg.getMessage() instanceof DocumentMessage) { ((DocumentMessage) msg.getMessage()).setPriority(priority); } } catch (IllegalArgumentException i) { log.severe(i.getMessage()); } } } private void setRoute(DocumentOperationMessageV3 msg, FeederSettings settings) { if (settings.route != null) { msg.getMessage().setRoute(settings.route); } } protected final void log(Level level, Object... msgParts) { if (!log.isLoggable(level)) return; StringBuilder s = new StringBuilder(); for (Object part : msgParts) s.append(part.toString()); log.log(level, s.toString()); } private void updateOpsPerSec() { Instant now = Instant.now(); synchronized (monitor) { if (now.plusSeconds(1).isAfter(prevOpsPerSecTime)) { Duration duration = Duration.between(now, prevOpsPerSecTime); double opsPerSec = operationsForOpsPerSec / (duration.toMillis() / 1000.); metric.set(MetricNames.OPERATIONS_PER_SEC, opsPerSec, null /*metricContext*/); operationsForOpsPerSec = 1.0d; prevOpsPerSecTime = now; } else { operationsForOpsPerSec += 1.0d; } } } /* * The gateway handle overload from clients in different ways. * * If the backend is overloaded, but not the gateway, it will fill the backend, messagebus throttler * will start to block new documents and finally all threadsAvailableForFeeding will be blocking. * However, as more threads are added, the gateway will not block on messagebus but return * transitive errors on the documents that can not be processed. These errors will cause the client(s) to * back off a bit. * * However, we can also have the case that the gateway becomes the bottleneck (e.g. CPU). In this case * we need to stop processing of new messages as early as possible and reject the request. This * will cause the client(s) to back off for a while. We want some slack before we enter this mode. * If we can simply transitively fail each document, it is nicer. Therefor we allow some threads to be * busy processing requests with transitive errors before entering this mode. Since we already * have flooded the backend, have several threads hanging and waiting for capacity, the number should * not be very large. Too much slack can lead to too many threads handling feed and impacting query traffic. */ interface HttpThrottlePolicy { boolean shouldThrottle(); } }
class ClientFeederV3 { protected static final Logger log = Logger.getLogger(ClientFeederV3.class.getName()); private final static AtomicInteger outstandingOperations = new AtomicInteger(0); private final BlockingQueue<OperationStatus> feedReplies = new LinkedBlockingQueue<>(); private final ReferencedResource<SharedSourceSession> sourceSession; private final String clientId; private final ReplyHandler feedReplyHandler; private final Metric metric; private final HttpThrottlePolicy httpThrottlePolicy; private Instant prevOpsPerSecTime = Instant.now(); private double operationsForOpsPerSec = 0d; private final Object monitor = new Object(); private final StreamReaderV3 streamReaderV3; private final AtomicInteger ongoingRequests = new AtomicInteger(0); private final String hostName; ClientFeederV3( ReferencedResource<SharedSourceSession> sourceSession, FeedReaderFactory feedReaderFactory, DocumentTypeManager docTypeManager, String clientId, Metric metric, ReplyHandler feedReplyHandler, HttpThrottlePolicy httpThrottlePolicy) { this.sourceSession = sourceSession; this.clientId = clientId; this.feedReplyHandler = feedReplyHandler; this.metric = metric; this.httpThrottlePolicy = httpThrottlePolicy; this.streamReaderV3 = new StreamReaderV3(feedReaderFactory, docTypeManager); this.hostName = HostName.getLocalhost(); } public boolean timedOut() { synchronized (monitor) { return Instant.now().isAfter(prevOpsPerSecTime.plusSeconds(6000)) && ongoingRequests.get() == 0; } } public void kill() { while (ongoingRequests.get() > 0) { try { ongoingRequests.wait(100); } catch (InterruptedException e) { break; } } sourceSession.getReference().close(); } private void transferPreviousRepliesToResponse(BlockingQueue<OperationStatus> operations) throws InterruptedException { OperationStatus status = feedReplies.poll(); while (status != null) { outstandingOperations.decrementAndGet(); operations.put(status); status = feedReplies.poll(); } } public HttpResponse handleRequest(HttpRequest request) throws IOException { ongoingRequests.incrementAndGet(); try { FeederSettings feederSettings = new FeederSettings(request); if (httpThrottlePolicy.shouldThrottle()) { return new ErrorHttpResponse(getOverloadReturnCode(request), "Gateway overloaded"); } InputStream inputStream = StreamReaderV3.unzipStreamIfNeeded(request); BlockingQueue<OperationStatus> replies = new LinkedBlockingQueue<>(); try { feed(feederSettings, inputStream, replies); synchronized (monitor) { if (request.getJDiscRequest().headers().get(Headers.DATA_FORMAT) != null) { transferPreviousRepliesToResponse(replies); } } } catch (InterruptedException e) { } catch (Throwable e) { log.log(Level.WARNING, "Unhandled exception while feeding: " + Exceptions.toMessageString(e), e); } finally { replies.add(createOperationStatus("-", "-", ErrorCode.END_OF_FEED, null)); } return new FeedResponse(200, replies, 3, clientId, outstandingOperations.get(), hostName); } finally { ongoingRequests.decrementAndGet(); } } private int getOverloadReturnCode(HttpRequest request) { if (request.getHeader(Headers.SILENTUPGRADE) != null ) return 299; return 429; } private Optional<DocumentOperationMessageV3> pullMessageFromRequest(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<String> operationId; try { operationId = streamReaderV3.getNextOperationId(requestInputStream); if (operationId.isEmpty()) return Optional.empty(); } catch (IOException ioe) { log.log(Level.FINE, () -> Exceptions.toMessageString(ioe)); return Optional.empty(); } try { DocumentOperationMessageV3 message = getNextMessage(operationId.get(), requestInputStream, settings); if (message != null) setRoute(message, settings); return Optional.ofNullable(message); } catch (Exception e) { log.log(Level.WARNING, () -> Exceptions.toMessageString(e)); metric.add(MetricNames.PARSE_ERROR, 1, null); repliesFromOldMessages.add(new OperationStatus(Exceptions.toMessageString(e), operationId.get(), ErrorCode.ERROR, false, "")); } } } private void feed(FeederSettings settings, InputStream requestInputStream, BlockingQueue<OperationStatus> repliesFromOldMessages) { while (true) { Optional<DocumentOperationMessageV3> message = pullMessageFromRequest(settings, requestInputStream, repliesFromOldMessages); if (message.isEmpty()) break; setMessageParameters(message.get(), settings); Result result; try { result = sendMessage(message.get()); } catch (RuntimeException e) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), Exceptions.toMessageString(e), ErrorCode.ERROR, message.get().getMessage())); continue; } if (result.isAccepted()) { outstandingOperations.incrementAndGet(); updateOpsPerSec(); log(Level.FINE, "Sent message successfully, document id: ", message.get().getOperationId()); } else if (!result.getError().isFatal()) { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.TRANSIENT_ERROR, message.get().getMessage())); } else { repliesFromOldMessages.add(createOperationStatus(message.get().getOperationId(), result.getError().getMessage(), ErrorCode.ERROR, message.get().getMessage())); } } } private OperationStatus createOperationStatus(String id, String message, ErrorCode code, Message msg) { String traceMessage = msg != null && msg.getTrace() != null && msg.getTrace().getLevel() > 0 ? msg.getTrace().toString() : ""; return new OperationStatus(message, id, code, false, traceMessage); } /** Returns the next message in the stream, or null if none */ protected DocumentOperationMessageV3 getNextMessage(String operationId, InputStream requestInputStream, FeederSettings settings) throws Exception { FeedOperation operation = streamReaderV3.getNextOperation(requestInputStream, settings); if (sourceSession.getResource().session() != null) { metric.set( MetricNames.PENDING, Double.valueOf(sourceSession.getResource().session().getPendingCount()), null); } DocumentOperationMessageV3 message = DocumentOperationMessageV3.create(operation, operationId, metric); if (message == null) { return null; } metric.add(MetricNames.NUM_OPERATIONS, 1, null /*metricContext*/); log(Level.FINE, "Successfully deserialized document id: ", message.getOperationId()); return message; } private void setMessageParameters(DocumentOperationMessageV3 msg, FeederSettings settings) { msg.getMessage().setContext(new ReplyContext(msg.getOperationId(), feedReplies)); if (settings.traceLevel != null) { msg.getMessage().getTrace().setLevel(settings.traceLevel); } if (settings.priority != null) { try { DocumentProtocol.Priority priority = DocumentProtocol.Priority.valueOf(settings.priority); if (msg.getMessage() instanceof DocumentMessage) { ((DocumentMessage) msg.getMessage()).setPriority(priority); } } catch (IllegalArgumentException i) { log.severe(i.getMessage()); } } } private void setRoute(DocumentOperationMessageV3 msg, FeederSettings settings) { if (settings.route != null) { msg.getMessage().setRoute(settings.route); } } protected final void log(Level level, Object... msgParts) { if (!log.isLoggable(level)) return; StringBuilder s = new StringBuilder(); for (Object part : msgParts) s.append(part.toString()); log.log(level, s.toString()); } private void updateOpsPerSec() { Instant now = Instant.now(); synchronized (monitor) { if (now.plusSeconds(1).isAfter(prevOpsPerSecTime)) { Duration duration = Duration.between(now, prevOpsPerSecTime); double opsPerSec = operationsForOpsPerSec / (duration.toMillis() / 1000.); metric.set(MetricNames.OPERATIONS_PER_SEC, opsPerSec, null /*metricContext*/); operationsForOpsPerSec = 1.0d; prevOpsPerSecTime = now; } else { operationsForOpsPerSec += 1.0d; } } } /* * The gateway handle overload from clients in different ways. * * If the backend is overloaded, but not the gateway, it will fill the backend, messagebus throttler * will start to block new documents and finally all threadsAvailableForFeeding will be blocking. * However, as more threads are added, the gateway will not block on messagebus but return * transitive errors on the documents that can not be processed. These errors will cause the client(s) to * back off a bit. * * However, we can also have the case that the gateway becomes the bottleneck (e.g. CPU). In this case * we need to stop processing of new messages as early as possible and reject the request. This * will cause the client(s) to back off for a while. We want some slack before we enter this mode. * If we can simply transitively fail each document, it is nicer. Therefor we allow some threads to be * busy processing requests with transitive errors before entering this mode. Since we already * have flooded the backend, have several threads hanging and waiting for capacity, the number should * not be very large. Too much slack can lead to too many threads handling feed and impacting query traffic. */ interface HttpThrottlePolicy { boolean shouldThrottle(); } }
I think this should be after the active node preference?
public int compareTo(NodeCandidate other) { if (!this.violatesSpares && other.violatesSpares) return -1; if (!other.violatesSpares && this.violatesSpares) return 1; if (this.exclusiveSwitch && !other.exclusiveSwitch) return -1; if (other.exclusiveSwitch && !this.exclusiveSwitch) return 1; if (this.node.state() == Node.State.active && other.node.state() != Node.State.active) return -1; if (other.node.state() == Node.State.active && this.node.state() != Node.State.active) return 1; if (!this.isSurplusNode && other.isSurplusNode) return -1; if (!other.isSurplusNode && this.isSurplusNode) return 1; if (this.isInNodeRepoAndReserved() && ! other.isInNodeRepoAndReserved()) return -1; if (other.isInNodeRepoAndReserved() && ! this.isInNodeRepoAndReserved()) return 1; if (this.node.state() == Node.State.inactive && other.node.state() != Node.State.inactive) return -1; if (other.node.state() == Node.State.inactive && this.node.state() != Node.State.inactive) return 1; if (this.node.state() == Node.State.ready && other.node.state() != Node.State.ready) return -1; if (other.node.state() == Node.State.ready && this.node.state() != Node.State.ready) return 1; if (this.node.state() != other.node.state()) throw new IllegalStateException("Nodes " + this.node + " and " + other.node + " have different states"); if (this.parent.isPresent() && other.parent.isPresent()) { if ( this.parent.get().reservedTo().isPresent() && ! other.parent.get().reservedTo().isPresent()) return -1; if ( ! this.parent.get().reservedTo().isPresent() && other.parent.get().reservedTo().isPresent()) return 1; int diskCostDifference = NodeResources.DiskSpeed.compare(this.parent.get().flavor().resources().diskSpeed(), other.parent.get().flavor().resources().diskSpeed()); if (diskCostDifference != 0) return diskCostDifference; int storageCostDifference = NodeResources.StorageType.compare(this.parent.get().flavor().resources().storageType(), other.parent.get().flavor().resources().storageType()); if (storageCostDifference != 0) return storageCostDifference; if ( lessThanHalfTheHost(this) && ! lessThanHalfTheHost(other)) return -1; if ( ! lessThanHalfTheHost(this) && lessThanHalfTheHost(other)) return 1; } int hostPriority = Double.compare(this.skewWithThis() - this.skewWithoutThis(), other.skewWithThis() - other.skewWithoutThis()); if (hostPriority != 0) return hostPriority; if (this.node.flavor().cost() < other.node.flavor().cost()) return -1; if (other.node.flavor().cost() < this.node.flavor().cost()) return 1; int thisHostStatePri = this.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); int otherHostStatePri = other.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); if (thisHostStatePri != otherHostStatePri) return otherHostStatePri - thisHostStatePri; if (this.node.allocation().isPresent() && other.node.allocation().isPresent()) return Integer.compare(this.node.allocation().get().membership().index(), other.node.allocation().get().membership().index()); return this.node.hostname().compareTo(other.node.hostname()); }
public int compareTo(NodeCandidate other) { if (!this.violatesSpares && other.violatesSpares) return -1; if (!other.violatesSpares && this.violatesSpares) return 1; if (this.node.state() == Node.State.active && other.node.state() != Node.State.active) return -1; if (other.node.state() == Node.State.active && this.node.state() != Node.State.active) return 1; if (!this.isSurplusNode && other.isSurplusNode) return -1; if (!other.isSurplusNode && this.isSurplusNode) return 1; if (this.isInNodeRepoAndReserved() && ! other.isInNodeRepoAndReserved()) return -1; if (other.isInNodeRepoAndReserved() && ! this.isInNodeRepoAndReserved()) return 1; if (this.node.state() == Node.State.inactive && other.node.state() != Node.State.inactive) return -1; if (other.node.state() == Node.State.inactive && this.node.state() != Node.State.inactive) return 1; if (this.node.state() == Node.State.ready && other.node.state() != Node.State.ready) return -1; if (other.node.state() == Node.State.ready && this.node.state() != Node.State.ready) return 1; if (this.node.state() != other.node.state()) throw new IllegalStateException("Nodes " + this.node + " and " + other.node + " have different states"); if (this.parent.isPresent() && other.parent.isPresent()) { if ( this.parent.get().reservedTo().isPresent() && ! other.parent.get().reservedTo().isPresent()) return -1; if ( ! this.parent.get().reservedTo().isPresent() && other.parent.get().reservedTo().isPresent()) return 1; int diskCostDifference = NodeResources.DiskSpeed.compare(this.parent.get().flavor().resources().diskSpeed(), other.parent.get().flavor().resources().diskSpeed()); if (diskCostDifference != 0) return diskCostDifference; int storageCostDifference = NodeResources.StorageType.compare(this.parent.get().flavor().resources().storageType(), other.parent.get().flavor().resources().storageType()); if (storageCostDifference != 0) return storageCostDifference; if ( lessThanHalfTheHost(this) && ! lessThanHalfTheHost(other)) return -1; if ( ! lessThanHalfTheHost(this) && lessThanHalfTheHost(other)) return 1; } int hostPriority = Double.compare(this.skewWithThis() - this.skewWithoutThis(), other.skewWithThis() - other.skewWithoutThis()); if (hostPriority != 0) return hostPriority; if (this.node.flavor().cost() < other.node.flavor().cost()) return -1; if (other.node.flavor().cost() < this.node.flavor().cost()) return 1; int thisHostStatePri = this.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); int otherHostStatePri = other.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); if (thisHostStatePri != otherHostStatePri) return otherHostStatePri - thisHostStatePri; if (this.node.allocation().isPresent() && other.node.allocation().isPresent()) return Integer.compare(this.node.allocation().get().membership().index(), other.node.allocation().get().membership().index()); return this.node.hostname().compareTo(other.node.hostname()); }
class NodeCandidate implements Comparable<NodeCandidate> { /** List of host states ordered by preference (ascending) */ private static final List<Node.State> HOST_STATE_PRIORITY = List.of(Node.State.provisioned, Node.State.ready, Node.State.active); private static final NodeResources zeroResources = new NodeResources(0, 0, 0, 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any); final Node node; /** The free capacity on the parent of this node, before adding this node to it */ private final NodeResources freeParentCapacity; /** The parent host (docker or hypervisor) */ final Optional<Node> parent; /** True if this node is allocated on a host that should be dedicated as a spare */ final boolean violatesSpares; /** True if this node is allocated on an exclusive network switch in its cluster */ final boolean exclusiveSwitch; /** True if this node belongs to a group which will not be needed after this deployment */ final boolean isSurplusNode; /** This node does not exist in the node repository yet */ final boolean isNewNode; /** This node can be resized to the new NodeResources */ final boolean isResizable; NodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean exclusiveSwitch, boolean isSurplusNode, boolean isNewNode, boolean isResizeable) { if (isResizeable && isNewNode) { throw new IllegalArgumentException("A new node cannot be resizable"); } this.node = node; this.freeParentCapacity = freeParentCapacity; this.parent = parent; this.violatesSpares = violatesSpares; this.exclusiveSwitch = exclusiveSwitch; this.isSurplusNode = isSurplusNode; this.isNewNode = isNewNode; this.isResizable = isResizeable; } /** * Compare this candidate to another * * @return negative if this should be preferred over other */ @Override /** Returns the allocation skew of the parent of this before adding this node to it */ double skewWithoutThis() { return skewWith(zeroResources); } /** Returns the allocation skew of the parent of this after adding this node to it */ double skewWithThis() { return skewWith(node.resources()); } /** Returns a copy of this with node set to given value */ NodeCandidate withNode(Node node) { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplusNode, isNewNode, isResizable); } private boolean lessThanHalfTheHost(NodeCandidate node) { var n = node.node.resources(); var h = node.parent.get().resources(); if (h.vcpu() < n.vcpu() * 2) return false; if (h.memoryGb() < n.memoryGb() * 2) return false; if (h.diskGb() < n.diskGb() * 2) return false; return true; } private double skewWith(NodeResources resources) { if (parent.isEmpty()) return 0; NodeResources free = freeParentCapacity.justNumbers().subtract(resources.justNumbers()); return Node.skew(parent.get().flavor().resources(), free); } private boolean isInNodeRepoAndReserved() { if (isNewNode) return false; return node.state().equals(Node.State.reserved); } @Override public String toString() { return node.id(); } @Override public int hashCode() { return node.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeCandidate)) return false; return this.node.equals(((NodeCandidate)other).node); } static class Builder { public final Node node; private NodeResources freeParentCapacity; private Optional<Node> parent = Optional.empty(); private boolean violatesSpares; private boolean isSurplusNode; private boolean isNewNode; private boolean isResizable; private boolean exclusiveSwitch; Builder(Node node) { this.node = node; this.freeParentCapacity = node.flavor().resources(); } /** The free capacity of the parent, before adding this node to it */ Builder freeParentCapacity(NodeResources freeParentCapacity) { this.freeParentCapacity = freeParentCapacity; return this; } Builder parent(Node parent) { this.parent = Optional.of(parent); return this; } Builder violatesSpares(boolean violatesSpares) { this.violatesSpares = violatesSpares; return this; } Builder exclusiveSwitch(boolean exclusiveSwitch) { this.exclusiveSwitch = exclusiveSwitch; return this; } Builder surplusNode(boolean surplusNode) { isSurplusNode = surplusNode; return this; } Builder newNode(boolean newNode) { isNewNode = newNode; return this; } Builder resizable(boolean resizable) { isResizable = resizable; return this; } NodeCandidate build() { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplusNode, isNewNode, isResizable); } } }
class NodeCandidate implements Comparable<NodeCandidate> { /** List of host states ordered by preference (ascending) */ private static final List<Node.State> HOST_STATE_PRIORITY = List.of(Node.State.provisioned, Node.State.ready, Node.State.active); private static final NodeResources zeroResources = new NodeResources(0, 0, 0, 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any); final Node node; /** The free capacity on the parent of this node, before adding this node to it */ private final NodeResources freeParentCapacity; /** The parent host (docker or hypervisor) */ final Optional<Node> parent; /** True if the node is allocated to a host that should be dedicated as a spare */ final boolean violatesSpares; /** True if this node belongs to a group which will not be needed after this deployment */ final boolean isSurplusNode; /** This node does not exist in the node repository yet */ final boolean isNewNode; /** This node can be resized to the new NodeResources */ final boolean isResizable; NodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplusNode, boolean isNewNode, boolean isResizeable) { if (isResizeable && isNewNode) throw new IllegalArgumentException("A new node cannot be resizable"); this.node = node; this.freeParentCapacity = freeParentCapacity; this.parent = parent; this.violatesSpares = violatesSpares; this.isSurplusNode = isSurplusNode; this.isNewNode = isNewNode; this.isResizable = isResizeable; } /** * Compare this candidate to another * * @return negative if first priority is higher than second node */ @Override /** Returns the allocation skew of the parent of this before adding this node to it */ double skewWithoutThis() { return skewWith(zeroResources); } /** Returns the allocation skew of the parent of this after adding this node to it */ double skewWithThis() { return skewWith(node.resources()); } /** Returns a copy of this with node set to given value */ NodeCandidate withNode(Node node) { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplusNode, isNewNode, isResizable); } private boolean lessThanHalfTheHost(NodeCandidate node) { var n = node.node.resources(); var h = node.parent.get().resources(); if (h.vcpu() < n.vcpu() * 2) return false; if (h.memoryGb() < n.memoryGb() * 2) return false; if (h.diskGb() < n.diskGb() * 2) return false; return true; } private double skewWith(NodeResources resources) { if (parent.isEmpty()) return 0; NodeResources free = freeParentCapacity.justNumbers().subtract(resources.justNumbers()); return Node.skew(parent.get().flavor().resources(), free); } private boolean isInNodeRepoAndReserved() { if (isNewNode) return false; return node.state().equals(Node.State.reserved); } @Override public String toString() { return node.id(); } @Override public int hashCode() { return node.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeCandidate)) return false; return this.node.equals(((NodeCandidate)other).node); } static class Builder { public final Node node; private NodeResources freeParentCapacity; private Optional<Node> parent = Optional.empty(); private boolean violatesSpares; private boolean isSurplusNode; private boolean isNewNode; private boolean isResizable; Builder(Node node) { this.node = node; this.freeParentCapacity = node.flavor().resources(); } /** The free capacity of the parent, before adding this node to it */ Builder freeParentCapacity(NodeResources freeParentCapacity) { this.freeParentCapacity = freeParentCapacity; return this; } Builder parent(Node parent) { this.parent = Optional.of(parent); return this; } Builder violatesSpares(boolean violatesSpares) { this.violatesSpares = violatesSpares; return this; } Builder surplusNode(boolean surplusNode) { isSurplusNode = surplusNode; return this; } Builder newNode(boolean newNode) { isNewNode = newNode; return this; } Builder resizable(boolean resizable) { isResizable = resizable; return this; } NodeCandidate build() { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplusNode, isNewNode, isResizable); } } }
It should? That means we'll never prefer a new node on a exclusive switch over an existing active node.
public int compareTo(NodeCandidate other) { if (!this.violatesSpares && other.violatesSpares) return -1; if (!other.violatesSpares && this.violatesSpares) return 1; if (this.exclusiveSwitch && !other.exclusiveSwitch) return -1; if (other.exclusiveSwitch && !this.exclusiveSwitch) return 1; if (this.node.state() == Node.State.active && other.node.state() != Node.State.active) return -1; if (other.node.state() == Node.State.active && this.node.state() != Node.State.active) return 1; if (!this.isSurplusNode && other.isSurplusNode) return -1; if (!other.isSurplusNode && this.isSurplusNode) return 1; if (this.isInNodeRepoAndReserved() && ! other.isInNodeRepoAndReserved()) return -1; if (other.isInNodeRepoAndReserved() && ! this.isInNodeRepoAndReserved()) return 1; if (this.node.state() == Node.State.inactive && other.node.state() != Node.State.inactive) return -1; if (other.node.state() == Node.State.inactive && this.node.state() != Node.State.inactive) return 1; if (this.node.state() == Node.State.ready && other.node.state() != Node.State.ready) return -1; if (other.node.state() == Node.State.ready && this.node.state() != Node.State.ready) return 1; if (this.node.state() != other.node.state()) throw new IllegalStateException("Nodes " + this.node + " and " + other.node + " have different states"); if (this.parent.isPresent() && other.parent.isPresent()) { if ( this.parent.get().reservedTo().isPresent() && ! other.parent.get().reservedTo().isPresent()) return -1; if ( ! this.parent.get().reservedTo().isPresent() && other.parent.get().reservedTo().isPresent()) return 1; int diskCostDifference = NodeResources.DiskSpeed.compare(this.parent.get().flavor().resources().diskSpeed(), other.parent.get().flavor().resources().diskSpeed()); if (diskCostDifference != 0) return diskCostDifference; int storageCostDifference = NodeResources.StorageType.compare(this.parent.get().flavor().resources().storageType(), other.parent.get().flavor().resources().storageType()); if (storageCostDifference != 0) return storageCostDifference; if ( lessThanHalfTheHost(this) && ! lessThanHalfTheHost(other)) return -1; if ( ! lessThanHalfTheHost(this) && lessThanHalfTheHost(other)) return 1; } int hostPriority = Double.compare(this.skewWithThis() - this.skewWithoutThis(), other.skewWithThis() - other.skewWithoutThis()); if (hostPriority != 0) return hostPriority; if (this.node.flavor().cost() < other.node.flavor().cost()) return -1; if (other.node.flavor().cost() < this.node.flavor().cost()) return 1; int thisHostStatePri = this.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); int otherHostStatePri = other.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); if (thisHostStatePri != otherHostStatePri) return otherHostStatePri - thisHostStatePri; if (this.node.allocation().isPresent() && other.node.allocation().isPresent()) return Integer.compare(this.node.allocation().get().membership().index(), other.node.allocation().get().membership().index()); return this.node.hostname().compareTo(other.node.hostname()); }
public int compareTo(NodeCandidate other) { if (!this.violatesSpares && other.violatesSpares) return -1; if (!other.violatesSpares && this.violatesSpares) return 1; if (this.node.state() == Node.State.active && other.node.state() != Node.State.active) return -1; if (other.node.state() == Node.State.active && this.node.state() != Node.State.active) return 1; if (!this.isSurplusNode && other.isSurplusNode) return -1; if (!other.isSurplusNode && this.isSurplusNode) return 1; if (this.isInNodeRepoAndReserved() && ! other.isInNodeRepoAndReserved()) return -1; if (other.isInNodeRepoAndReserved() && ! this.isInNodeRepoAndReserved()) return 1; if (this.node.state() == Node.State.inactive && other.node.state() != Node.State.inactive) return -1; if (other.node.state() == Node.State.inactive && this.node.state() != Node.State.inactive) return 1; if (this.node.state() == Node.State.ready && other.node.state() != Node.State.ready) return -1; if (other.node.state() == Node.State.ready && this.node.state() != Node.State.ready) return 1; if (this.node.state() != other.node.state()) throw new IllegalStateException("Nodes " + this.node + " and " + other.node + " have different states"); if (this.parent.isPresent() && other.parent.isPresent()) { if ( this.parent.get().reservedTo().isPresent() && ! other.parent.get().reservedTo().isPresent()) return -1; if ( ! this.parent.get().reservedTo().isPresent() && other.parent.get().reservedTo().isPresent()) return 1; int diskCostDifference = NodeResources.DiskSpeed.compare(this.parent.get().flavor().resources().diskSpeed(), other.parent.get().flavor().resources().diskSpeed()); if (diskCostDifference != 0) return diskCostDifference; int storageCostDifference = NodeResources.StorageType.compare(this.parent.get().flavor().resources().storageType(), other.parent.get().flavor().resources().storageType()); if (storageCostDifference != 0) return storageCostDifference; if ( lessThanHalfTheHost(this) && ! lessThanHalfTheHost(other)) return -1; if ( ! lessThanHalfTheHost(this) && lessThanHalfTheHost(other)) return 1; } int hostPriority = Double.compare(this.skewWithThis() - this.skewWithoutThis(), other.skewWithThis() - other.skewWithoutThis()); if (hostPriority != 0) return hostPriority; if (this.node.flavor().cost() < other.node.flavor().cost()) return -1; if (other.node.flavor().cost() < this.node.flavor().cost()) return 1; int thisHostStatePri = this.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); int otherHostStatePri = other.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); if (thisHostStatePri != otherHostStatePri) return otherHostStatePri - thisHostStatePri; if (this.node.allocation().isPresent() && other.node.allocation().isPresent()) return Integer.compare(this.node.allocation().get().membership().index(), other.node.allocation().get().membership().index()); return this.node.hostname().compareTo(other.node.hostname()); }
class NodeCandidate implements Comparable<NodeCandidate> { /** List of host states ordered by preference (ascending) */ private static final List<Node.State> HOST_STATE_PRIORITY = List.of(Node.State.provisioned, Node.State.ready, Node.State.active); private static final NodeResources zeroResources = new NodeResources(0, 0, 0, 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any); final Node node; /** The free capacity on the parent of this node, before adding this node to it */ private final NodeResources freeParentCapacity; /** The parent host (docker or hypervisor) */ final Optional<Node> parent; /** True if this node is allocated on a host that should be dedicated as a spare */ final boolean violatesSpares; /** True if this node is allocated on an exclusive network switch in its cluster */ final boolean exclusiveSwitch; /** True if this node belongs to a group which will not be needed after this deployment */ final boolean isSurplusNode; /** This node does not exist in the node repository yet */ final boolean isNewNode; /** This node can be resized to the new NodeResources */ final boolean isResizable; NodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean exclusiveSwitch, boolean isSurplusNode, boolean isNewNode, boolean isResizeable) { if (isResizeable && isNewNode) { throw new IllegalArgumentException("A new node cannot be resizable"); } this.node = node; this.freeParentCapacity = freeParentCapacity; this.parent = parent; this.violatesSpares = violatesSpares; this.exclusiveSwitch = exclusiveSwitch; this.isSurplusNode = isSurplusNode; this.isNewNode = isNewNode; this.isResizable = isResizeable; } /** * Compare this candidate to another * * @return negative if this should be preferred over other */ @Override /** Returns the allocation skew of the parent of this before adding this node to it */ double skewWithoutThis() { return skewWith(zeroResources); } /** Returns the allocation skew of the parent of this after adding this node to it */ double skewWithThis() { return skewWith(node.resources()); } /** Returns a copy of this with node set to given value */ NodeCandidate withNode(Node node) { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplusNode, isNewNode, isResizable); } private boolean lessThanHalfTheHost(NodeCandidate node) { var n = node.node.resources(); var h = node.parent.get().resources(); if (h.vcpu() < n.vcpu() * 2) return false; if (h.memoryGb() < n.memoryGb() * 2) return false; if (h.diskGb() < n.diskGb() * 2) return false; return true; } private double skewWith(NodeResources resources) { if (parent.isEmpty()) return 0; NodeResources free = freeParentCapacity.justNumbers().subtract(resources.justNumbers()); return Node.skew(parent.get().flavor().resources(), free); } private boolean isInNodeRepoAndReserved() { if (isNewNode) return false; return node.state().equals(Node.State.reserved); } @Override public String toString() { return node.id(); } @Override public int hashCode() { return node.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeCandidate)) return false; return this.node.equals(((NodeCandidate)other).node); } static class Builder { public final Node node; private NodeResources freeParentCapacity; private Optional<Node> parent = Optional.empty(); private boolean violatesSpares; private boolean isSurplusNode; private boolean isNewNode; private boolean isResizable; private boolean exclusiveSwitch; Builder(Node node) { this.node = node; this.freeParentCapacity = node.flavor().resources(); } /** The free capacity of the parent, before adding this node to it */ Builder freeParentCapacity(NodeResources freeParentCapacity) { this.freeParentCapacity = freeParentCapacity; return this; } Builder parent(Node parent) { this.parent = Optional.of(parent); return this; } Builder violatesSpares(boolean violatesSpares) { this.violatesSpares = violatesSpares; return this; } Builder exclusiveSwitch(boolean exclusiveSwitch) { this.exclusiveSwitch = exclusiveSwitch; return this; } Builder surplusNode(boolean surplusNode) { isSurplusNode = surplusNode; return this; } Builder newNode(boolean newNode) { isNewNode = newNode; return this; } Builder resizable(boolean resizable) { isResizable = resizable; return this; } NodeCandidate build() { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplusNode, isNewNode, isResizable); } } }
class NodeCandidate implements Comparable<NodeCandidate> { /** List of host states ordered by preference (ascending) */ private static final List<Node.State> HOST_STATE_PRIORITY = List.of(Node.State.provisioned, Node.State.ready, Node.State.active); private static final NodeResources zeroResources = new NodeResources(0, 0, 0, 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any); final Node node; /** The free capacity on the parent of this node, before adding this node to it */ private final NodeResources freeParentCapacity; /** The parent host (docker or hypervisor) */ final Optional<Node> parent; /** True if the node is allocated to a host that should be dedicated as a spare */ final boolean violatesSpares; /** True if this node belongs to a group which will not be needed after this deployment */ final boolean isSurplusNode; /** This node does not exist in the node repository yet */ final boolean isNewNode; /** This node can be resized to the new NodeResources */ final boolean isResizable; NodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplusNode, boolean isNewNode, boolean isResizeable) { if (isResizeable && isNewNode) throw new IllegalArgumentException("A new node cannot be resizable"); this.node = node; this.freeParentCapacity = freeParentCapacity; this.parent = parent; this.violatesSpares = violatesSpares; this.isSurplusNode = isSurplusNode; this.isNewNode = isNewNode; this.isResizable = isResizeable; } /** * Compare this candidate to another * * @return negative if first priority is higher than second node */ @Override /** Returns the allocation skew of the parent of this before adding this node to it */ double skewWithoutThis() { return skewWith(zeroResources); } /** Returns the allocation skew of the parent of this after adding this node to it */ double skewWithThis() { return skewWith(node.resources()); } /** Returns a copy of this with node set to given value */ NodeCandidate withNode(Node node) { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplusNode, isNewNode, isResizable); } private boolean lessThanHalfTheHost(NodeCandidate node) { var n = node.node.resources(); var h = node.parent.get().resources(); if (h.vcpu() < n.vcpu() * 2) return false; if (h.memoryGb() < n.memoryGb() * 2) return false; if (h.diskGb() < n.diskGb() * 2) return false; return true; } private double skewWith(NodeResources resources) { if (parent.isEmpty()) return 0; NodeResources free = freeParentCapacity.justNumbers().subtract(resources.justNumbers()); return Node.skew(parent.get().flavor().resources(), free); } private boolean isInNodeRepoAndReserved() { if (isNewNode) return false; return node.state().equals(Node.State.reserved); } @Override public String toString() { return node.id(); } @Override public int hashCode() { return node.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeCandidate)) return false; return this.node.equals(((NodeCandidate)other).node); } static class Builder { public final Node node; private NodeResources freeParentCapacity; private Optional<Node> parent = Optional.empty(); private boolean violatesSpares; private boolean isSurplusNode; private boolean isNewNode; private boolean isResizable; Builder(Node node) { this.node = node; this.freeParentCapacity = node.flavor().resources(); } /** The free capacity of the parent, before adding this node to it */ Builder freeParentCapacity(NodeResources freeParentCapacity) { this.freeParentCapacity = freeParentCapacity; return this; } Builder parent(Node parent) { this.parent = Optional.of(parent); return this; } Builder violatesSpares(boolean violatesSpares) { this.violatesSpares = violatesSpares; return this; } Builder surplusNode(boolean surplusNode) { isSurplusNode = surplusNode; return this; } Builder newNode(boolean newNode) { isNewNode = newNode; return this; } Builder resizable(boolean resizable) { isResizable = resizable; return this; } NodeCandidate build() { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplusNode, isNewNode, isResizable); } } }
The way it is now we'll always (if this code gets its way) prefer new nodes to existing ones if the switch distribution is not optimal. I don't think we want that but instead want to do a controlled retire attempt node by node initiated by a maintainer?
public int compareTo(NodeCandidate other) { if (!this.violatesSpares && other.violatesSpares) return -1; if (!other.violatesSpares && this.violatesSpares) return 1; if (this.exclusiveSwitch && !other.exclusiveSwitch) return -1; if (other.exclusiveSwitch && !this.exclusiveSwitch) return 1; if (this.node.state() == Node.State.active && other.node.state() != Node.State.active) return -1; if (other.node.state() == Node.State.active && this.node.state() != Node.State.active) return 1; if (!this.isSurplusNode && other.isSurplusNode) return -1; if (!other.isSurplusNode && this.isSurplusNode) return 1; if (this.isInNodeRepoAndReserved() && ! other.isInNodeRepoAndReserved()) return -1; if (other.isInNodeRepoAndReserved() && ! this.isInNodeRepoAndReserved()) return 1; if (this.node.state() == Node.State.inactive && other.node.state() != Node.State.inactive) return -1; if (other.node.state() == Node.State.inactive && this.node.state() != Node.State.inactive) return 1; if (this.node.state() == Node.State.ready && other.node.state() != Node.State.ready) return -1; if (other.node.state() == Node.State.ready && this.node.state() != Node.State.ready) return 1; if (this.node.state() != other.node.state()) throw new IllegalStateException("Nodes " + this.node + " and " + other.node + " have different states"); if (this.parent.isPresent() && other.parent.isPresent()) { if ( this.parent.get().reservedTo().isPresent() && ! other.parent.get().reservedTo().isPresent()) return -1; if ( ! this.parent.get().reservedTo().isPresent() && other.parent.get().reservedTo().isPresent()) return 1; int diskCostDifference = NodeResources.DiskSpeed.compare(this.parent.get().flavor().resources().diskSpeed(), other.parent.get().flavor().resources().diskSpeed()); if (diskCostDifference != 0) return diskCostDifference; int storageCostDifference = NodeResources.StorageType.compare(this.parent.get().flavor().resources().storageType(), other.parent.get().flavor().resources().storageType()); if (storageCostDifference != 0) return storageCostDifference; if ( lessThanHalfTheHost(this) && ! lessThanHalfTheHost(other)) return -1; if ( ! lessThanHalfTheHost(this) && lessThanHalfTheHost(other)) return 1; } int hostPriority = Double.compare(this.skewWithThis() - this.skewWithoutThis(), other.skewWithThis() - other.skewWithoutThis()); if (hostPriority != 0) return hostPriority; if (this.node.flavor().cost() < other.node.flavor().cost()) return -1; if (other.node.flavor().cost() < this.node.flavor().cost()) return 1; int thisHostStatePri = this.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); int otherHostStatePri = other.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); if (thisHostStatePri != otherHostStatePri) return otherHostStatePri - thisHostStatePri; if (this.node.allocation().isPresent() && other.node.allocation().isPresent()) return Integer.compare(this.node.allocation().get().membership().index(), other.node.allocation().get().membership().index()); return this.node.hostname().compareTo(other.node.hostname()); }
public int compareTo(NodeCandidate other) { if (!this.violatesSpares && other.violatesSpares) return -1; if (!other.violatesSpares && this.violatesSpares) return 1; if (this.node.state() == Node.State.active && other.node.state() != Node.State.active) return -1; if (other.node.state() == Node.State.active && this.node.state() != Node.State.active) return 1; if (!this.isSurplusNode && other.isSurplusNode) return -1; if (!other.isSurplusNode && this.isSurplusNode) return 1; if (this.isInNodeRepoAndReserved() && ! other.isInNodeRepoAndReserved()) return -1; if (other.isInNodeRepoAndReserved() && ! this.isInNodeRepoAndReserved()) return 1; if (this.node.state() == Node.State.inactive && other.node.state() != Node.State.inactive) return -1; if (other.node.state() == Node.State.inactive && this.node.state() != Node.State.inactive) return 1; if (this.node.state() == Node.State.ready && other.node.state() != Node.State.ready) return -1; if (other.node.state() == Node.State.ready && this.node.state() != Node.State.ready) return 1; if (this.node.state() != other.node.state()) throw new IllegalStateException("Nodes " + this.node + " and " + other.node + " have different states"); if (this.parent.isPresent() && other.parent.isPresent()) { if ( this.parent.get().reservedTo().isPresent() && ! other.parent.get().reservedTo().isPresent()) return -1; if ( ! this.parent.get().reservedTo().isPresent() && other.parent.get().reservedTo().isPresent()) return 1; int diskCostDifference = NodeResources.DiskSpeed.compare(this.parent.get().flavor().resources().diskSpeed(), other.parent.get().flavor().resources().diskSpeed()); if (diskCostDifference != 0) return diskCostDifference; int storageCostDifference = NodeResources.StorageType.compare(this.parent.get().flavor().resources().storageType(), other.parent.get().flavor().resources().storageType()); if (storageCostDifference != 0) return storageCostDifference; if ( lessThanHalfTheHost(this) && ! lessThanHalfTheHost(other)) return -1; if ( ! lessThanHalfTheHost(this) && lessThanHalfTheHost(other)) return 1; } int hostPriority = Double.compare(this.skewWithThis() - this.skewWithoutThis(), other.skewWithThis() - other.skewWithoutThis()); if (hostPriority != 0) return hostPriority; if (this.node.flavor().cost() < other.node.flavor().cost()) return -1; if (other.node.flavor().cost() < this.node.flavor().cost()) return 1; int thisHostStatePri = this.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); int otherHostStatePri = other.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); if (thisHostStatePri != otherHostStatePri) return otherHostStatePri - thisHostStatePri; if (this.node.allocation().isPresent() && other.node.allocation().isPresent()) return Integer.compare(this.node.allocation().get().membership().index(), other.node.allocation().get().membership().index()); return this.node.hostname().compareTo(other.node.hostname()); }
class NodeCandidate implements Comparable<NodeCandidate> { /** List of host states ordered by preference (ascending) */ private static final List<Node.State> HOST_STATE_PRIORITY = List.of(Node.State.provisioned, Node.State.ready, Node.State.active); private static final NodeResources zeroResources = new NodeResources(0, 0, 0, 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any); final Node node; /** The free capacity on the parent of this node, before adding this node to it */ private final NodeResources freeParentCapacity; /** The parent host (docker or hypervisor) */ final Optional<Node> parent; /** True if this node is allocated on a host that should be dedicated as a spare */ final boolean violatesSpares; /** True if this node is allocated on an exclusive network switch in its cluster */ final boolean exclusiveSwitch; /** True if this node belongs to a group which will not be needed after this deployment */ final boolean isSurplusNode; /** This node does not exist in the node repository yet */ final boolean isNewNode; /** This node can be resized to the new NodeResources */ final boolean isResizable; NodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean exclusiveSwitch, boolean isSurplusNode, boolean isNewNode, boolean isResizeable) { if (isResizeable && isNewNode) { throw new IllegalArgumentException("A new node cannot be resizable"); } this.node = node; this.freeParentCapacity = freeParentCapacity; this.parent = parent; this.violatesSpares = violatesSpares; this.exclusiveSwitch = exclusiveSwitch; this.isSurplusNode = isSurplusNode; this.isNewNode = isNewNode; this.isResizable = isResizeable; } /** * Compare this candidate to another * * @return negative if this should be preferred over other */ @Override /** Returns the allocation skew of the parent of this before adding this node to it */ double skewWithoutThis() { return skewWith(zeroResources); } /** Returns the allocation skew of the parent of this after adding this node to it */ double skewWithThis() { return skewWith(node.resources()); } /** Returns a copy of this with node set to given value */ NodeCandidate withNode(Node node) { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplusNode, isNewNode, isResizable); } private boolean lessThanHalfTheHost(NodeCandidate node) { var n = node.node.resources(); var h = node.parent.get().resources(); if (h.vcpu() < n.vcpu() * 2) return false; if (h.memoryGb() < n.memoryGb() * 2) return false; if (h.diskGb() < n.diskGb() * 2) return false; return true; } private double skewWith(NodeResources resources) { if (parent.isEmpty()) return 0; NodeResources free = freeParentCapacity.justNumbers().subtract(resources.justNumbers()); return Node.skew(parent.get().flavor().resources(), free); } private boolean isInNodeRepoAndReserved() { if (isNewNode) return false; return node.state().equals(Node.State.reserved); } @Override public String toString() { return node.id(); } @Override public int hashCode() { return node.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeCandidate)) return false; return this.node.equals(((NodeCandidate)other).node); } static class Builder { public final Node node; private NodeResources freeParentCapacity; private Optional<Node> parent = Optional.empty(); private boolean violatesSpares; private boolean isSurplusNode; private boolean isNewNode; private boolean isResizable; private boolean exclusiveSwitch; Builder(Node node) { this.node = node; this.freeParentCapacity = node.flavor().resources(); } /** The free capacity of the parent, before adding this node to it */ Builder freeParentCapacity(NodeResources freeParentCapacity) { this.freeParentCapacity = freeParentCapacity; return this; } Builder parent(Node parent) { this.parent = Optional.of(parent); return this; } Builder violatesSpares(boolean violatesSpares) { this.violatesSpares = violatesSpares; return this; } Builder exclusiveSwitch(boolean exclusiveSwitch) { this.exclusiveSwitch = exclusiveSwitch; return this; } Builder surplusNode(boolean surplusNode) { isSurplusNode = surplusNode; return this; } Builder newNode(boolean newNode) { isNewNode = newNode; return this; } Builder resizable(boolean resizable) { isResizable = resizable; return this; } NodeCandidate build() { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplusNode, isNewNode, isResizable); } } }
class NodeCandidate implements Comparable<NodeCandidate> { /** List of host states ordered by preference (ascending) */ private static final List<Node.State> HOST_STATE_PRIORITY = List.of(Node.State.provisioned, Node.State.ready, Node.State.active); private static final NodeResources zeroResources = new NodeResources(0, 0, 0, 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any); final Node node; /** The free capacity on the parent of this node, before adding this node to it */ private final NodeResources freeParentCapacity; /** The parent host (docker or hypervisor) */ final Optional<Node> parent; /** True if the node is allocated to a host that should be dedicated as a spare */ final boolean violatesSpares; /** True if this node belongs to a group which will not be needed after this deployment */ final boolean isSurplusNode; /** This node does not exist in the node repository yet */ final boolean isNewNode; /** This node can be resized to the new NodeResources */ final boolean isResizable; NodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplusNode, boolean isNewNode, boolean isResizeable) { if (isResizeable && isNewNode) throw new IllegalArgumentException("A new node cannot be resizable"); this.node = node; this.freeParentCapacity = freeParentCapacity; this.parent = parent; this.violatesSpares = violatesSpares; this.isSurplusNode = isSurplusNode; this.isNewNode = isNewNode; this.isResizable = isResizeable; } /** * Compare this candidate to another * * @return negative if first priority is higher than second node */ @Override /** Returns the allocation skew of the parent of this before adding this node to it */ double skewWithoutThis() { return skewWith(zeroResources); } /** Returns the allocation skew of the parent of this after adding this node to it */ double skewWithThis() { return skewWith(node.resources()); } /** Returns a copy of this with node set to given value */ NodeCandidate withNode(Node node) { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplusNode, isNewNode, isResizable); } private boolean lessThanHalfTheHost(NodeCandidate node) { var n = node.node.resources(); var h = node.parent.get().resources(); if (h.vcpu() < n.vcpu() * 2) return false; if (h.memoryGb() < n.memoryGb() * 2) return false; if (h.diskGb() < n.diskGb() * 2) return false; return true; } private double skewWith(NodeResources resources) { if (parent.isEmpty()) return 0; NodeResources free = freeParentCapacity.justNumbers().subtract(resources.justNumbers()); return Node.skew(parent.get().flavor().resources(), free); } private boolean isInNodeRepoAndReserved() { if (isNewNode) return false; return node.state().equals(Node.State.reserved); } @Override public String toString() { return node.id(); } @Override public int hashCode() { return node.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeCandidate)) return false; return this.node.equals(((NodeCandidate)other).node); } static class Builder { public final Node node; private NodeResources freeParentCapacity; private Optional<Node> parent = Optional.empty(); private boolean violatesSpares; private boolean isSurplusNode; private boolean isNewNode; private boolean isResizable; Builder(Node node) { this.node = node; this.freeParentCapacity = node.flavor().resources(); } /** The free capacity of the parent, before adding this node to it */ Builder freeParentCapacity(NodeResources freeParentCapacity) { this.freeParentCapacity = freeParentCapacity; return this; } Builder parent(Node parent) { this.parent = Optional.of(parent); return this; } Builder violatesSpares(boolean violatesSpares) { this.violatesSpares = violatesSpares; return this; } Builder surplusNode(boolean surplusNode) { isSurplusNode = surplusNode; return this; } Builder newNode(boolean newNode) { isNewNode = newNode; return this; } Builder resizable(boolean resizable) { isResizable = resizable; return this; } NodeCandidate build() { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplusNode, isNewNode, isResizable); } } }
```suggestion case "exclusiveTo": ```
private Node applyField(Node node, String name, Inspector value, Inspector root, boolean applyingAsChild) { switch (name) { case "currentRebootGeneration" : return node.withCurrentRebootGeneration(asLong(value), clock.instant()); case "currentRestartGeneration" : return patchCurrentRestartGeneration(node, asLong(value)); case "currentDockerImage" : if (node.type().isHost()) throw new IllegalArgumentException("Container image can only be set for child nodes"); return node.with(node.status().withContainerImage(DockerImage.fromString(asString(value)))); case "vespaVersion" : case "currentVespaVersion" : return node.with(node.status().withVespaVersion(Version.fromString(asString(value)))); case "currentOsVersion" : return node.withCurrentOsVersion(Version.fromString(asString(value)), clock.instant()); case "currentFirmwareCheck": return node.withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value))); case "failCount" : return node.with(node.status().withFailCount(asLong(value).intValue())); case "flavor" : return node.with(nodeFlavors.getFlavorOrThrow(asString(value))); case "parentHostname" : return node.withParentHostname(asString(value)); case "ipAddresses" : return IP.Config.verify(node.with(node.ipConfig().with(asStringSet(value))), memoizedNodes.get()); case "additionalIpAddresses" : return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), memoizedNodes.get()); case WANT_TO_RETIRE : case WANT_TO_DEPROVISION : boolean wantToRetire = asOptionalBoolean(root.field(WANT_TO_RETIRE)).orElse(node.status().wantToRetire()); boolean wantToDeprovision = asOptionalBoolean(root.field(WANT_TO_DEPROVISION)).orElse(node.status().wantToDeprovision()); return node.withWantToRetire(wantToRetire, wantToDeprovision && !applyingAsChild, Agent.operator, clock.instant()); case "reports" : return nodeWithPatchedReports(node, value); case "openStackId" : return node.withOpenStackId(asString(value)); case "diskGb": case "minDiskAvailableGb": return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble()))); case "memoryGb": case "minMainMemoryAvailableGb": return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble()))); case "vcpu": case "minCpuCores": return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble()))); case "fastDisk": return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? fast : slow))); case "remoteStorage": return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? remote : local))); case "bandwidthGbps": return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble()))); case "modelName": return value.type() == Type.NIX ? node.withoutModelName() : node.withModelName(asString(value)); case "requiredDiskSpeed": return patchRequiredDiskSpeed(node, asString(value)); case "reservedTo": return value.type() == Type.NIX ? node.withoutReservedTo() : node.withReservedTo(TenantName.from(value.asString())); case "exclusiveTo": return node.withExclusiveTo(SlimeUtils.optionalString(value).map(ApplicationId::fromSerializedForm).orElse(null)); case "switchHostname": return value.type() == Type.NIX ? node.withoutSwitchHostname() : node.withSwitchHostname(value.asString()); default : throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field"); } }
case "exclusiveTo":
private Node applyField(Node node, String name, Inspector value, Inspector root, boolean applyingAsChild) { switch (name) { case "currentRebootGeneration" : return node.withCurrentRebootGeneration(asLong(value), clock.instant()); case "currentRestartGeneration" : return patchCurrentRestartGeneration(node, asLong(value)); case "currentDockerImage" : if (node.type().isHost()) throw new IllegalArgumentException("Container image can only be set for child nodes"); return node.with(node.status().withContainerImage(DockerImage.fromString(asString(value)))); case "vespaVersion" : case "currentVespaVersion" : return node.with(node.status().withVespaVersion(Version.fromString(asString(value)))); case "currentOsVersion" : return node.withCurrentOsVersion(Version.fromString(asString(value)), clock.instant()); case "currentFirmwareCheck": return node.withFirmwareVerifiedAt(Instant.ofEpochMilli(asLong(value))); case "failCount" : return node.with(node.status().withFailCount(asLong(value).intValue())); case "flavor" : return node.with(nodeFlavors.getFlavorOrThrow(asString(value))); case "parentHostname" : return node.withParentHostname(asString(value)); case "ipAddresses" : return IP.Config.verify(node.with(node.ipConfig().with(asStringSet(value))), memoizedNodes.get()); case "additionalIpAddresses" : return IP.Config.verify(node.with(node.ipConfig().with(IP.Pool.of(asStringSet(value)))), memoizedNodes.get()); case WANT_TO_RETIRE : case WANT_TO_DEPROVISION : boolean wantToRetire = asOptionalBoolean(root.field(WANT_TO_RETIRE)).orElse(node.status().wantToRetire()); boolean wantToDeprovision = asOptionalBoolean(root.field(WANT_TO_DEPROVISION)).orElse(node.status().wantToDeprovision()); return node.withWantToRetire(wantToRetire, wantToDeprovision && !applyingAsChild, Agent.operator, clock.instant()); case "reports" : return nodeWithPatchedReports(node, value); case "openStackId" : return node.withOpenStackId(asString(value)); case "diskGb": case "minDiskAvailableGb": return node.with(node.flavor().with(node.flavor().resources().withDiskGb(value.asDouble()))); case "memoryGb": case "minMainMemoryAvailableGb": return node.with(node.flavor().with(node.flavor().resources().withMemoryGb(value.asDouble()))); case "vcpu": case "minCpuCores": return node.with(node.flavor().with(node.flavor().resources().withVcpu(value.asDouble()))); case "fastDisk": return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? fast : slow))); case "remoteStorage": return node.with(node.flavor().with(node.flavor().resources().with(value.asBool() ? remote : local))); case "bandwidthGbps": return node.with(node.flavor().with(node.flavor().resources().withBandwidthGbps(value.asDouble()))); case "modelName": return value.type() == Type.NIX ? node.withoutModelName() : node.withModelName(asString(value)); case "requiredDiskSpeed": return patchRequiredDiskSpeed(node, asString(value)); case "reservedTo": return value.type() == Type.NIX ? node.withoutReservedTo() : node.withReservedTo(TenantName.from(value.asString())); case "exclusiveTo": return node.withExclusiveTo(SlimeUtils.optionalString(value).map(ApplicationId::fromSerializedForm).orElse(null)); case "switchHostname": return value.type() == Type.NIX ? node.withoutSwitchHostname() : node.withSwitchHostname(value.asString()); default : throw new IllegalArgumentException("Could not apply field '" + name + "' on a node: No such modifiable field"); } }
class NodePatcher { private static final String WANT_TO_RETIRE = "wantToRetire"; private static final String WANT_TO_DEPROVISION = "wantToDeprovision"; private static final Set<String> RECURSIVE_FIELDS = Set.of(WANT_TO_RETIRE); private final com.google.common.base.Supplier<LockedNodeList> memoizedNodes; private final PatchedNodes patchedNodes; private final NodeFlavors nodeFlavors; private final Inspector inspector; private final Clock clock; public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, Supplier<LockedNodeList> nodes, Clock clock) { this.memoizedNodes = Suppliers.memoize(nodes::get); this.patchedNodes = new PatchedNodes(node); this.nodeFlavors = nodeFlavors; this.clock = clock; try { this.inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get(); } catch (IOException e) { throw new UncheckedIOException("Error reading request body", e); } } /** * Apply the json to the node and return all nodes affected by the patch. * More than 1 node may be affected if e.g. the node is a Docker host, which may have * children that must be updated in a consistent manner. */ public List<Node> apply() { inspector.traverse((String name, Inspector value) -> { try { patchedNodes.update(applyField(patchedNodes.node(), name, value, inspector, false)); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not set field '" + name + "'", e); } if (RECURSIVE_FIELDS.contains(name)) { for (Node child: patchedNodes.children()) patchedNodes.update(applyField(child, name, value, inspector, true)); } } ); return patchedNodes.nodes(); } private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) { Node patchedNode; if (reportsInspector.type() == Type.NIX) { patchedNode = node.with(new Reports()); } else { var reportsBuilder = new Reports.Builder(node.reports()); reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> { if (reportInspector.type() == Type.NIX) { reportsBuilder.clearReport(reportId); } else { reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector)); } }); patchedNode = node.with(reportsBuilder.build()); } boolean hadHardFailReports = node.reports().getReports().stream() .anyMatch(r -> r.getType() == Report.Type.HARD_FAIL); boolean hasHardFailReports = patchedNode.reports().getReports().stream() .anyMatch(r -> r.getType() == Report.Type.HARD_FAIL); if (hadHardFailReports != hasHardFailReports) { if ((hasHardFailReports && node.state() == Node.State.failed) || node.state() == Node.State.parked) return patchedNode; patchedNode = patchedNode.withWantToRetire(hasHardFailReports, hasHardFailReports, Agent.system, clock.instant()); } return patchedNode; } private Set<String> asStringSet(Inspector field) { if ( ! field.type().equals(Type.ARRAY)) throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type()); TreeSet<String> strings = new TreeSet<>(); for (int i = 0; i < field.entries(); i++) { Inspector entry = field.entry(i); if ( ! entry.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type()); strings.add(entry.asString()); } return strings; } private Node patchRequiredDiskSpeed(Node node, String value) { Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) return node.with(allocation.get().withRequestedResources( allocation.get().requestedResources().with(NodeResources.DiskSpeed.valueOf(value)))); else throw new IllegalArgumentException("Node is not allocated"); } private Node patchCurrentRestartGeneration(Node node, Long value) { Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value))); else throw new IllegalArgumentException("Node is not allocated"); } private Long asLong(Inspector field) { if ( ! field.type().equals(Type.LONG)) throw new IllegalArgumentException("Expected a LONG value, got a " + field.type()); return field.asLong(); } private String asString(Inspector field) { if ( ! field.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + field.type()); return field.asString(); } private boolean asBoolean(Inspector field) { if ( ! field.type().equals(Type.BOOL)) throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type()); return field.asBool(); } private Optional<Boolean> asOptionalBoolean(Inspector field) { return Optional.of(field).filter(Inspector::valid).map(this::asBoolean); } private class PatchedNodes { private final Map<String, Node> nodes = new HashMap<>(); private final String hostname; private boolean fetchedChildren; private PatchedNodes(Node node) { this.hostname = node.hostname(); nodes.put(hostname, node); fetchedChildren = !node.type().isHost(); } public Node node() { return nodes.get(hostname); } public List<Node> children() { if (!fetchedChildren) { memoizedNodes.get().childrenOf(hostname).forEach(this::update); fetchedChildren = true; } return nodes.values().stream().filter(node -> !node.type().isHost()).collect(Collectors.toList()); } public void update(Node node) { nodes.put(node.hostname(), node); } public List<Node> nodes() { return List.copyOf(nodes.values()); } } }
class NodePatcher { private static final String WANT_TO_RETIRE = "wantToRetire"; private static final String WANT_TO_DEPROVISION = "wantToDeprovision"; private static final Set<String> RECURSIVE_FIELDS = Set.of(WANT_TO_RETIRE); private final com.google.common.base.Supplier<LockedNodeList> memoizedNodes; private final PatchedNodes patchedNodes; private final NodeFlavors nodeFlavors; private final Inspector inspector; private final Clock clock; public NodePatcher(NodeFlavors nodeFlavors, InputStream json, Node node, Supplier<LockedNodeList> nodes, Clock clock) { this.memoizedNodes = Suppliers.memoize(nodes::get); this.patchedNodes = new PatchedNodes(node); this.nodeFlavors = nodeFlavors; this.clock = clock; try { this.inspector = SlimeUtils.jsonToSlime(IOUtils.readBytes(json, 1000 * 1000)).get(); } catch (IOException e) { throw new UncheckedIOException("Error reading request body", e); } } /** * Apply the json to the node and return all nodes affected by the patch. * More than 1 node may be affected if e.g. the node is a Docker host, which may have * children that must be updated in a consistent manner. */ public List<Node> apply() { inspector.traverse((String name, Inspector value) -> { try { patchedNodes.update(applyField(patchedNodes.node(), name, value, inspector, false)); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Could not set field '" + name + "'", e); } if (RECURSIVE_FIELDS.contains(name)) { for (Node child: patchedNodes.children()) patchedNodes.update(applyField(child, name, value, inspector, true)); } } ); return patchedNodes.nodes(); } private Node nodeWithPatchedReports(Node node, Inspector reportsInspector) { Node patchedNode; if (reportsInspector.type() == Type.NIX) { patchedNode = node.with(new Reports()); } else { var reportsBuilder = new Reports.Builder(node.reports()); reportsInspector.traverse((ObjectTraverser) (reportId, reportInspector) -> { if (reportInspector.type() == Type.NIX) { reportsBuilder.clearReport(reportId); } else { reportsBuilder.setReport(Report.fromSlime(reportId, reportInspector)); } }); patchedNode = node.with(reportsBuilder.build()); } boolean hadHardFailReports = node.reports().getReports().stream() .anyMatch(r -> r.getType() == Report.Type.HARD_FAIL); boolean hasHardFailReports = patchedNode.reports().getReports().stream() .anyMatch(r -> r.getType() == Report.Type.HARD_FAIL); if (hadHardFailReports != hasHardFailReports) { if ((hasHardFailReports && node.state() == Node.State.failed) || node.state() == Node.State.parked) return patchedNode; patchedNode = patchedNode.withWantToRetire(hasHardFailReports, hasHardFailReports, Agent.system, clock.instant()); } return patchedNode; } private Set<String> asStringSet(Inspector field) { if ( ! field.type().equals(Type.ARRAY)) throw new IllegalArgumentException("Expected an ARRAY value, got a " + field.type()); TreeSet<String> strings = new TreeSet<>(); for (int i = 0; i < field.entries(); i++) { Inspector entry = field.entry(i); if ( ! entry.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + entry.type()); strings.add(entry.asString()); } return strings; } private Node patchRequiredDiskSpeed(Node node, String value) { Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) return node.with(allocation.get().withRequestedResources( allocation.get().requestedResources().with(NodeResources.DiskSpeed.valueOf(value)))); else throw new IllegalArgumentException("Node is not allocated"); } private Node patchCurrentRestartGeneration(Node node, Long value) { Optional<Allocation> allocation = node.allocation(); if (allocation.isPresent()) return node.with(allocation.get().withRestart(allocation.get().restartGeneration().withCurrent(value))); else throw new IllegalArgumentException("Node is not allocated"); } private Long asLong(Inspector field) { if ( ! field.type().equals(Type.LONG)) throw new IllegalArgumentException("Expected a LONG value, got a " + field.type()); return field.asLong(); } private String asString(Inspector field) { if ( ! field.type().equals(Type.STRING)) throw new IllegalArgumentException("Expected a STRING value, got a " + field.type()); return field.asString(); } private boolean asBoolean(Inspector field) { if ( ! field.type().equals(Type.BOOL)) throw new IllegalArgumentException("Expected a BOOL value, got a " + field.type()); return field.asBool(); } private Optional<Boolean> asOptionalBoolean(Inspector field) { return Optional.of(field).filter(Inspector::valid).map(this::asBoolean); } private class PatchedNodes { private final Map<String, Node> nodes = new HashMap<>(); private final String hostname; private boolean fetchedChildren; private PatchedNodes(Node node) { this.hostname = node.hostname(); nodes.put(hostname, node); fetchedChildren = !node.type().isHost(); } public Node node() { return nodes.get(hostname); } public List<Node> children() { if (!fetchedChildren) { memoizedNodes.get().childrenOf(hostname).forEach(this::update); fetchedChildren = true; } return nodes.values().stream().filter(node -> !node.type().isHost()).collect(Collectors.toList()); } public void update(Node node) { nodes.put(node.hostname(), node); } public List<Node> nodes() { return List.copyOf(nodes.values()); } } }
To spread out retiring in time? OK, I buy that.
public int compareTo(NodeCandidate other) { if (!this.violatesSpares && other.violatesSpares) return -1; if (!other.violatesSpares && this.violatesSpares) return 1; if (this.exclusiveSwitch && !other.exclusiveSwitch) return -1; if (other.exclusiveSwitch && !this.exclusiveSwitch) return 1; if (this.node.state() == Node.State.active && other.node.state() != Node.State.active) return -1; if (other.node.state() == Node.State.active && this.node.state() != Node.State.active) return 1; if (!this.isSurplusNode && other.isSurplusNode) return -1; if (!other.isSurplusNode && this.isSurplusNode) return 1; if (this.isInNodeRepoAndReserved() && ! other.isInNodeRepoAndReserved()) return -1; if (other.isInNodeRepoAndReserved() && ! this.isInNodeRepoAndReserved()) return 1; if (this.node.state() == Node.State.inactive && other.node.state() != Node.State.inactive) return -1; if (other.node.state() == Node.State.inactive && this.node.state() != Node.State.inactive) return 1; if (this.node.state() == Node.State.ready && other.node.state() != Node.State.ready) return -1; if (other.node.state() == Node.State.ready && this.node.state() != Node.State.ready) return 1; if (this.node.state() != other.node.state()) throw new IllegalStateException("Nodes " + this.node + " and " + other.node + " have different states"); if (this.parent.isPresent() && other.parent.isPresent()) { if ( this.parent.get().reservedTo().isPresent() && ! other.parent.get().reservedTo().isPresent()) return -1; if ( ! this.parent.get().reservedTo().isPresent() && other.parent.get().reservedTo().isPresent()) return 1; int diskCostDifference = NodeResources.DiskSpeed.compare(this.parent.get().flavor().resources().diskSpeed(), other.parent.get().flavor().resources().diskSpeed()); if (diskCostDifference != 0) return diskCostDifference; int storageCostDifference = NodeResources.StorageType.compare(this.parent.get().flavor().resources().storageType(), other.parent.get().flavor().resources().storageType()); if (storageCostDifference != 0) return storageCostDifference; if ( lessThanHalfTheHost(this) && ! lessThanHalfTheHost(other)) return -1; if ( ! lessThanHalfTheHost(this) && lessThanHalfTheHost(other)) return 1; } int hostPriority = Double.compare(this.skewWithThis() - this.skewWithoutThis(), other.skewWithThis() - other.skewWithoutThis()); if (hostPriority != 0) return hostPriority; if (this.node.flavor().cost() < other.node.flavor().cost()) return -1; if (other.node.flavor().cost() < this.node.flavor().cost()) return 1; int thisHostStatePri = this.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); int otherHostStatePri = other.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); if (thisHostStatePri != otherHostStatePri) return otherHostStatePri - thisHostStatePri; if (this.node.allocation().isPresent() && other.node.allocation().isPresent()) return Integer.compare(this.node.allocation().get().membership().index(), other.node.allocation().get().membership().index()); return this.node.hostname().compareTo(other.node.hostname()); }
public int compareTo(NodeCandidate other) { if (!this.violatesSpares && other.violatesSpares) return -1; if (!other.violatesSpares && this.violatesSpares) return 1; if (this.node.state() == Node.State.active && other.node.state() != Node.State.active) return -1; if (other.node.state() == Node.State.active && this.node.state() != Node.State.active) return 1; if (!this.isSurplusNode && other.isSurplusNode) return -1; if (!other.isSurplusNode && this.isSurplusNode) return 1; if (this.isInNodeRepoAndReserved() && ! other.isInNodeRepoAndReserved()) return -1; if (other.isInNodeRepoAndReserved() && ! this.isInNodeRepoAndReserved()) return 1; if (this.node.state() == Node.State.inactive && other.node.state() != Node.State.inactive) return -1; if (other.node.state() == Node.State.inactive && this.node.state() != Node.State.inactive) return 1; if (this.node.state() == Node.State.ready && other.node.state() != Node.State.ready) return -1; if (other.node.state() == Node.State.ready && this.node.state() != Node.State.ready) return 1; if (this.node.state() != other.node.state()) throw new IllegalStateException("Nodes " + this.node + " and " + other.node + " have different states"); if (this.parent.isPresent() && other.parent.isPresent()) { if ( this.parent.get().reservedTo().isPresent() && ! other.parent.get().reservedTo().isPresent()) return -1; if ( ! this.parent.get().reservedTo().isPresent() && other.parent.get().reservedTo().isPresent()) return 1; int diskCostDifference = NodeResources.DiskSpeed.compare(this.parent.get().flavor().resources().diskSpeed(), other.parent.get().flavor().resources().diskSpeed()); if (diskCostDifference != 0) return diskCostDifference; int storageCostDifference = NodeResources.StorageType.compare(this.parent.get().flavor().resources().storageType(), other.parent.get().flavor().resources().storageType()); if (storageCostDifference != 0) return storageCostDifference; if ( lessThanHalfTheHost(this) && ! lessThanHalfTheHost(other)) return -1; if ( ! lessThanHalfTheHost(this) && lessThanHalfTheHost(other)) return 1; } int hostPriority = Double.compare(this.skewWithThis() - this.skewWithoutThis(), other.skewWithThis() - other.skewWithoutThis()); if (hostPriority != 0) return hostPriority; if (this.node.flavor().cost() < other.node.flavor().cost()) return -1; if (other.node.flavor().cost() < this.node.flavor().cost()) return 1; int thisHostStatePri = this.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); int otherHostStatePri = other.parent.map(host -> HOST_STATE_PRIORITY.indexOf(host.state())).orElse(-2); if (thisHostStatePri != otherHostStatePri) return otherHostStatePri - thisHostStatePri; if (this.node.allocation().isPresent() && other.node.allocation().isPresent()) return Integer.compare(this.node.allocation().get().membership().index(), other.node.allocation().get().membership().index()); return this.node.hostname().compareTo(other.node.hostname()); }
class NodeCandidate implements Comparable<NodeCandidate> { /** List of host states ordered by preference (ascending) */ private static final List<Node.State> HOST_STATE_PRIORITY = List.of(Node.State.provisioned, Node.State.ready, Node.State.active); private static final NodeResources zeroResources = new NodeResources(0, 0, 0, 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any); final Node node; /** The free capacity on the parent of this node, before adding this node to it */ private final NodeResources freeParentCapacity; /** The parent host (docker or hypervisor) */ final Optional<Node> parent; /** True if this node is allocated on a host that should be dedicated as a spare */ final boolean violatesSpares; /** True if this node is allocated on an exclusive network switch in its cluster */ final boolean exclusiveSwitch; /** True if this node belongs to a group which will not be needed after this deployment */ final boolean isSurplusNode; /** This node does not exist in the node repository yet */ final boolean isNewNode; /** This node can be resized to the new NodeResources */ final boolean isResizable; NodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean exclusiveSwitch, boolean isSurplusNode, boolean isNewNode, boolean isResizeable) { if (isResizeable && isNewNode) { throw new IllegalArgumentException("A new node cannot be resizable"); } this.node = node; this.freeParentCapacity = freeParentCapacity; this.parent = parent; this.violatesSpares = violatesSpares; this.exclusiveSwitch = exclusiveSwitch; this.isSurplusNode = isSurplusNode; this.isNewNode = isNewNode; this.isResizable = isResizeable; } /** * Compare this candidate to another * * @return negative if this should be preferred over other */ @Override /** Returns the allocation skew of the parent of this before adding this node to it */ double skewWithoutThis() { return skewWith(zeroResources); } /** Returns the allocation skew of the parent of this after adding this node to it */ double skewWithThis() { return skewWith(node.resources()); } /** Returns a copy of this with node set to given value */ NodeCandidate withNode(Node node) { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplusNode, isNewNode, isResizable); } private boolean lessThanHalfTheHost(NodeCandidate node) { var n = node.node.resources(); var h = node.parent.get().resources(); if (h.vcpu() < n.vcpu() * 2) return false; if (h.memoryGb() < n.memoryGb() * 2) return false; if (h.diskGb() < n.diskGb() * 2) return false; return true; } private double skewWith(NodeResources resources) { if (parent.isEmpty()) return 0; NodeResources free = freeParentCapacity.justNumbers().subtract(resources.justNumbers()); return Node.skew(parent.get().flavor().resources(), free); } private boolean isInNodeRepoAndReserved() { if (isNewNode) return false; return node.state().equals(Node.State.reserved); } @Override public String toString() { return node.id(); } @Override public int hashCode() { return node.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeCandidate)) return false; return this.node.equals(((NodeCandidate)other).node); } static class Builder { public final Node node; private NodeResources freeParentCapacity; private Optional<Node> parent = Optional.empty(); private boolean violatesSpares; private boolean isSurplusNode; private boolean isNewNode; private boolean isResizable; private boolean exclusiveSwitch; Builder(Node node) { this.node = node; this.freeParentCapacity = node.flavor().resources(); } /** The free capacity of the parent, before adding this node to it */ Builder freeParentCapacity(NodeResources freeParentCapacity) { this.freeParentCapacity = freeParentCapacity; return this; } Builder parent(Node parent) { this.parent = Optional.of(parent); return this; } Builder violatesSpares(boolean violatesSpares) { this.violatesSpares = violatesSpares; return this; } Builder exclusiveSwitch(boolean exclusiveSwitch) { this.exclusiveSwitch = exclusiveSwitch; return this; } Builder surplusNode(boolean surplusNode) { isSurplusNode = surplusNode; return this; } Builder newNode(boolean newNode) { isNewNode = newNode; return this; } Builder resizable(boolean resizable) { isResizable = resizable; return this; } NodeCandidate build() { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, exclusiveSwitch, isSurplusNode, isNewNode, isResizable); } } }
class NodeCandidate implements Comparable<NodeCandidate> { /** List of host states ordered by preference (ascending) */ private static final List<Node.State> HOST_STATE_PRIORITY = List.of(Node.State.provisioned, Node.State.ready, Node.State.active); private static final NodeResources zeroResources = new NodeResources(0, 0, 0, 0, NodeResources.DiskSpeed.any, NodeResources.StorageType.any); final Node node; /** The free capacity on the parent of this node, before adding this node to it */ private final NodeResources freeParentCapacity; /** The parent host (docker or hypervisor) */ final Optional<Node> parent; /** True if the node is allocated to a host that should be dedicated as a spare */ final boolean violatesSpares; /** True if this node belongs to a group which will not be needed after this deployment */ final boolean isSurplusNode; /** This node does not exist in the node repository yet */ final boolean isNewNode; /** This node can be resized to the new NodeResources */ final boolean isResizable; NodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplusNode, boolean isNewNode, boolean isResizeable) { if (isResizeable && isNewNode) throw new IllegalArgumentException("A new node cannot be resizable"); this.node = node; this.freeParentCapacity = freeParentCapacity; this.parent = parent; this.violatesSpares = violatesSpares; this.isSurplusNode = isSurplusNode; this.isNewNode = isNewNode; this.isResizable = isResizeable; } /** * Compare this candidate to another * * @return negative if first priority is higher than second node */ @Override /** Returns the allocation skew of the parent of this before adding this node to it */ double skewWithoutThis() { return skewWith(zeroResources); } /** Returns the allocation skew of the parent of this after adding this node to it */ double skewWithThis() { return skewWith(node.resources()); } /** Returns a copy of this with node set to given value */ NodeCandidate withNode(Node node) { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplusNode, isNewNode, isResizable); } private boolean lessThanHalfTheHost(NodeCandidate node) { var n = node.node.resources(); var h = node.parent.get().resources(); if (h.vcpu() < n.vcpu() * 2) return false; if (h.memoryGb() < n.memoryGb() * 2) return false; if (h.diskGb() < n.diskGb() * 2) return false; return true; } private double skewWith(NodeResources resources) { if (parent.isEmpty()) return 0; NodeResources free = freeParentCapacity.justNumbers().subtract(resources.justNumbers()); return Node.skew(parent.get().flavor().resources(), free); } private boolean isInNodeRepoAndReserved() { if (isNewNode) return false; return node.state().equals(Node.State.reserved); } @Override public String toString() { return node.id(); } @Override public int hashCode() { return node.hashCode(); } @Override public boolean equals(Object other) { if (other == this) return true; if ( ! (other instanceof NodeCandidate)) return false; return this.node.equals(((NodeCandidate)other).node); } static class Builder { public final Node node; private NodeResources freeParentCapacity; private Optional<Node> parent = Optional.empty(); private boolean violatesSpares; private boolean isSurplusNode; private boolean isNewNode; private boolean isResizable; Builder(Node node) { this.node = node; this.freeParentCapacity = node.flavor().resources(); } /** The free capacity of the parent, before adding this node to it */ Builder freeParentCapacity(NodeResources freeParentCapacity) { this.freeParentCapacity = freeParentCapacity; return this; } Builder parent(Node parent) { this.parent = Optional.of(parent); return this; } Builder violatesSpares(boolean violatesSpares) { this.violatesSpares = violatesSpares; return this; } Builder surplusNode(boolean surplusNode) { isSurplusNode = surplusNode; return this; } Builder newNode(boolean newNode) { isNewNode = newNode; return this; } Builder resizable(boolean resizable) { isResizable = resizable; return this; } NodeCandidate build() { return new NodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplusNode, isNewNode, isResizable); } } }
Same as above.
private TenantInfoAddress tenantInfoAddressFromSlime(Inspector addressObject) { return TenantInfoAddress.EmptyAddress .withAddressLines(addressObject.field("addressLines").asString()) .withPostalCodeOrZip(addressObject.field("postalCodeOrZip").asString()) .withCity(addressObject.field("city").asString()) .withStateRegionProvince(addressObject.field("stateRegionProvince").asString()) .withCountry(addressObject.field("country").asString()); }
return TenantInfoAddress.EmptyAddress
private TenantInfoAddress tenantInfoAddressFromSlime(Inspector addressObject) { return TenantInfoAddress.EMPTY .withAddressLines(addressObject.field("addressLines").asString()) .withPostalCodeOrZip(addressObject.field("postalCodeOrZip").asString()) .withCity(addressObject.field("city").asString()) .withStateRegionProvince(addressObject.field("stateRegionProvince").asString()) .withCountry(addressObject.field("country").asString()); }
class TenantSerializer { private static final String nameField = "name"; private static final String typeField = "type"; private static final String athenzDomainField = "athenzDomain"; private static final String propertyField = "property"; private static final String propertyIdField = "propertyId"; private static final String creatorField = "creator"; private static final String createdAtField = "createdAt"; private static final String contactField = "contact"; private static final String contactUrlField = "contactUrl"; private static final String propertyUrlField = "propertyUrl"; private static final String issueTrackerUrlField = "issueTrackerUrl"; private static final String personsField = "persons"; private static final String personField = "person"; private static final String queueField = "queue"; private static final String componentField = "component"; private static final String billingInfoField = "billingInfo"; private static final String customerIdField = "customerId"; private static final String productCodeField = "productCode"; private static final String pemDeveloperKeysField = "pemDeveloperKeys"; private static final String tenantInfo = "info"; public Slime toSlime(Tenant tenant) { Slime slime = new Slime(); Cursor tenantObject = slime.setObject(); tenantObject.setString(nameField, tenant.name().value()); tenantObject.setString(typeField, valueOf(tenant.type())); switch (tenant.type()) { case athenz: toSlime((AthenzTenant) tenant, tenantObject); break; case cloud: toSlime((CloudTenant) tenant, tenantObject); break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } return slime; } private void toSlime(AthenzTenant tenant, Cursor tenantObject) { tenantObject.setString(athenzDomainField, tenant.domain().getName()); tenantObject.setString(propertyField, tenant.property().id()); tenant.propertyId().ifPresent(propertyId -> tenantObject.setString(propertyIdField, propertyId.id())); tenant.contact().ifPresent(contact -> { Cursor contactCursor = tenantObject.setObject(contactField); writeContact(contact, contactCursor); }); } private void toSlime(CloudTenant tenant, Cursor root) { var legacyBillingInfo = new BillingInfo("customer", "Vespa"); tenant.creator().ifPresent(creator -> root.setString(creatorField, creator.getName())); developerKeysToSlime(tenant.developerKeys(), root.setArray(pemDeveloperKeysField)); toSlime(legacyBillingInfo, root.setObject(billingInfoField)); toSlime(tenant.info(), root); } private void developerKeysToSlime(BiMap<PublicKey, Principal> keys, Cursor array) { keys.forEach((key, user) -> { Cursor object = array.addObject(); object.setString("key", KeyUtils.toPem(key)); object.setString("user", user.getName()); }); } private void toSlime(BillingInfo billingInfo, Cursor billingInfoObject) { billingInfoObject.setString(customerIdField, billingInfo.customerId()); billingInfoObject.setString(productCodeField, billingInfo.productCode()); } public Tenant tenantFrom(Slime slime) { Inspector tenantObject = slime.get(); Tenant.Type type; type = typeOf(tenantObject.field(typeField).asString()); switch (type) { case athenz: return athenzTenantFrom(tenantObject); case cloud: return cloudTenantFrom(tenantObject); default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'."); } } private AthenzTenant athenzTenantFrom(Inspector tenantObject) { TenantName name = TenantName.from(tenantObject.field(nameField).asString()); AthenzDomain domain = new AthenzDomain(tenantObject.field(athenzDomainField).asString()); Property property = new Property(tenantObject.field(propertyField).asString()); Optional<PropertyId> propertyId = SlimeUtils.optionalString(tenantObject.field(propertyIdField)).map(PropertyId::new); Optional<Contact> contact = contactFrom(tenantObject.field(contactField)); return new AthenzTenant(name, domain, property, propertyId, contact); } private CloudTenant cloudTenantFrom(Inspector tenantObject) { TenantName name = TenantName.from(tenantObject.field(nameField).asString()); Optional<Principal> creator = SlimeUtils.optionalString(tenantObject.field(creatorField)).map(SimplePrincipal::new); BiMap<PublicKey, Principal> developerKeys = developerKeysFromSlime(tenantObject.field(pemDeveloperKeysField)); TenantInfo info = tenantInfoFromSlime(tenantObject.field(tenantInfo)); return new CloudTenant(name, creator, developerKeys, info); } private BiMap<PublicKey, Principal> developerKeysFromSlime(Inspector array) { ImmutableBiMap.Builder<PublicKey, Principal> keys = ImmutableBiMap.builder(); array.traverse((ArrayTraverser) (__, keyObject) -> keys.put(KeyUtils.fromPemEncodedPublicKey(keyObject.field("key").asString()), new SimplePrincipal(keyObject.field("user").asString()))); return keys.build(); } private TenantInfo tenantInfoFromSlime(Inspector infoObject) { return TenantInfo.EmptyInfo .withName(infoObject.field("name").asString()) .withEmail(infoObject.field("email").asString()) .withWebsite(infoObject.field("website").asString()) .withContactName(infoObject.field("contactName").asString()) .withContactEmail(infoObject.field("contactEmail").asString()) .withAddress(tenantInfoAddressFromSlime(infoObject.field("address"))) .withBillingContact(tenantInfoBillingContactFromSlime(infoObject.field("billingContact"))); } private TenantInfoBillingContact tenantInfoBillingContactFromSlime(Inspector billingObject) { return TenantInfoBillingContact.EmptyBillingContact .withName(billingObject.field("name").asString()) .withEmail(billingObject.field("email").asString()) .withPhone(billingObject.field("phone").asString()) .withAddress(tenantInfoAddressFromSlime(billingObject.field("address"))); } public void toSlime(TenantInfo info, Cursor parentCursor) { if (info.isEmpty()) return; Cursor infoCursor = parentCursor.setObject("info"); infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contactName()); infoCursor.setString("contactEmail", info.contactEmail()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); } private void toSlime(TenantInfoAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.addressLines()); addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.stateRegionProvince()); addressCursor.setString("country", address.country()); } private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("name", billingContact.name()); addressCursor.setString("email", billingContact.email()); addressCursor.setString("phone", billingContact.phone()); toSlime(billingContact.address(), addressCursor); } private Optional<Contact> contactFrom(Inspector object) { if ( ! object.valid()) return Optional.empty(); URI contactUrl = URI.create(object.field(contactUrlField).asString()); URI propertyUrl = URI.create(object.field(propertyUrlField).asString()); URI issueTrackerUrl = URI.create(object.field(issueTrackerUrlField).asString()); List<List<String>> persons = personsFrom(object.field(personsField)); String queue = object.field(queueField).asString(); Optional<String> component = object.field(componentField).valid() ? Optional.of(object.field(componentField).asString()) : Optional.empty(); return Optional.of(new Contact(contactUrl, propertyUrl, issueTrackerUrl, persons, queue, component)); } private void writeContact(Contact contact, Cursor contactCursor) { contactCursor.setString(contactUrlField, contact.url().toString()); contactCursor.setString(propertyUrlField, contact.propertyUrl().toString()); contactCursor.setString(issueTrackerUrlField, contact.issueTrackerUrl().toString()); Cursor personsArray = contactCursor.setArray(personsField); contact.persons().forEach(personList -> { Cursor personArray = personsArray.addArray(); personList.forEach(person -> { Cursor personObject = personArray.addObject(); personObject.setString(personField, person); }); }); contactCursor.setString(queueField, contact.queue()); contact.component().ifPresent(component -> contactCursor.setString(componentField, component)); } private List<List<String>> personsFrom(Inspector array) { List<List<String>> personLists = new ArrayList<>(); array.traverse((ArrayTraverser) (i, personArray) -> { List<String> persons = new ArrayList<>(); personArray.traverse((ArrayTraverser) (j, inspector) -> persons.add(inspector.field("person").asString())); personLists.add(persons); }); return personLists; } private BillingInfo billingInfoFrom(Inspector billingInfoObject) { return new BillingInfo(billingInfoObject.field(customerIdField).asString(), billingInfoObject.field(productCodeField).asString()); } private static Tenant.Type typeOf(String value) { switch (value) { case "athenz": return Tenant.Type.athenz; case "cloud": return Tenant.Type.cloud; default: throw new IllegalArgumentException("Unknown tenant type '" + value + "'."); } } private static String valueOf(Tenant.Type type) { switch (type) { case athenz: return "athenz"; case cloud: return "cloud"; default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'."); } } }
class TenantSerializer { private static final String nameField = "name"; private static final String typeField = "type"; private static final String athenzDomainField = "athenzDomain"; private static final String propertyField = "property"; private static final String propertyIdField = "propertyId"; private static final String creatorField = "creator"; private static final String createdAtField = "createdAt"; private static final String contactField = "contact"; private static final String contactUrlField = "contactUrl"; private static final String propertyUrlField = "propertyUrl"; private static final String issueTrackerUrlField = "issueTrackerUrl"; private static final String personsField = "persons"; private static final String personField = "person"; private static final String queueField = "queue"; private static final String componentField = "component"; private static final String billingInfoField = "billingInfo"; private static final String customerIdField = "customerId"; private static final String productCodeField = "productCode"; private static final String pemDeveloperKeysField = "pemDeveloperKeys"; private static final String tenantInfoField = "info"; public Slime toSlime(Tenant tenant) { Slime slime = new Slime(); Cursor tenantObject = slime.setObject(); tenantObject.setString(nameField, tenant.name().value()); tenantObject.setString(typeField, valueOf(tenant.type())); switch (tenant.type()) { case athenz: toSlime((AthenzTenant) tenant, tenantObject); break; case cloud: toSlime((CloudTenant) tenant, tenantObject); break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } return slime; } private void toSlime(AthenzTenant tenant, Cursor tenantObject) { tenantObject.setString(athenzDomainField, tenant.domain().getName()); tenantObject.setString(propertyField, tenant.property().id()); tenant.propertyId().ifPresent(propertyId -> tenantObject.setString(propertyIdField, propertyId.id())); tenant.contact().ifPresent(contact -> { Cursor contactCursor = tenantObject.setObject(contactField); writeContact(contact, contactCursor); }); } private void toSlime(CloudTenant tenant, Cursor root) { var legacyBillingInfo = new BillingInfo("customer", "Vespa"); tenant.creator().ifPresent(creator -> root.setString(creatorField, creator.getName())); developerKeysToSlime(tenant.developerKeys(), root.setArray(pemDeveloperKeysField)); toSlime(legacyBillingInfo, root.setObject(billingInfoField)); toSlime(tenant.info(), root); } private void developerKeysToSlime(BiMap<PublicKey, Principal> keys, Cursor array) { keys.forEach((key, user) -> { Cursor object = array.addObject(); object.setString("key", KeyUtils.toPem(key)); object.setString("user", user.getName()); }); } private void toSlime(BillingInfo billingInfo, Cursor billingInfoObject) { billingInfoObject.setString(customerIdField, billingInfo.customerId()); billingInfoObject.setString(productCodeField, billingInfo.productCode()); } public Tenant tenantFrom(Slime slime) { Inspector tenantObject = slime.get(); Tenant.Type type; type = typeOf(tenantObject.field(typeField).asString()); switch (type) { case athenz: return athenzTenantFrom(tenantObject); case cloud: return cloudTenantFrom(tenantObject); default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'."); } } private AthenzTenant athenzTenantFrom(Inspector tenantObject) { TenantName name = TenantName.from(tenantObject.field(nameField).asString()); AthenzDomain domain = new AthenzDomain(tenantObject.field(athenzDomainField).asString()); Property property = new Property(tenantObject.field(propertyField).asString()); Optional<PropertyId> propertyId = SlimeUtils.optionalString(tenantObject.field(propertyIdField)).map(PropertyId::new); Optional<Contact> contact = contactFrom(tenantObject.field(contactField)); return new AthenzTenant(name, domain, property, propertyId, contact); } private CloudTenant cloudTenantFrom(Inspector tenantObject) { TenantName name = TenantName.from(tenantObject.field(nameField).asString()); Optional<Principal> creator = SlimeUtils.optionalString(tenantObject.field(creatorField)).map(SimplePrincipal::new); BiMap<PublicKey, Principal> developerKeys = developerKeysFromSlime(tenantObject.field(pemDeveloperKeysField)); TenantInfo info = tenantInfoFromSlime(tenantObject.field(tenantInfoField)); return new CloudTenant(name, creator, developerKeys, info); } private BiMap<PublicKey, Principal> developerKeysFromSlime(Inspector array) { ImmutableBiMap.Builder<PublicKey, Principal> keys = ImmutableBiMap.builder(); array.traverse((ArrayTraverser) (__, keyObject) -> keys.put(KeyUtils.fromPemEncodedPublicKey(keyObject.field("key").asString()), new SimplePrincipal(keyObject.field("user").asString()))); return keys.build(); } TenantInfo tenantInfoFromSlime(Inspector infoObject) { if (!infoObject.valid()) return TenantInfo.EMPTY; return TenantInfo.EMPTY .withName(infoObject.field("name").asString()) .withEmail(infoObject.field("email").asString()) .withWebsite(infoObject.field("website").asString()) .withContactName(infoObject.field("contactName").asString()) .withContactEmail(infoObject.field("contactEmail").asString()) .withInvoiceEmail(infoObject.field("invoiceEmail").asString()) .withAddress(tenantInfoAddressFromSlime(infoObject.field("address"))) .withBillingContact(tenantInfoBillingContactFromSlime(infoObject.field("billingContact"))); } private TenantInfoBillingContact tenantInfoBillingContactFromSlime(Inspector billingObject) { return TenantInfoBillingContact.EMPTY .withName(billingObject.field("name").asString()) .withEmail(billingObject.field("email").asString()) .withPhone(billingObject.field("phone").asString()) .withAddress(tenantInfoAddressFromSlime(billingObject.field("address"))); } void toSlime(TenantInfo info, Cursor parentCursor) { if (info.isEmpty()) return; Cursor infoCursor = parentCursor.setObject("info"); infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("invoiceEmail", info.invoiceEmail()); infoCursor.setString("contactName", info.contactName()); infoCursor.setString("contactEmail", info.contactEmail()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); } private void toSlime(TenantInfoAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.addressLines()); addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.stateRegionProvince()); addressCursor.setString("country", address.country()); } private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.name()); addressCursor.setString("email", billingContact.email()); addressCursor.setString("phone", billingContact.phone()); toSlime(billingContact.address(), addressCursor); } private Optional<Contact> contactFrom(Inspector object) { if ( ! object.valid()) return Optional.empty(); URI contactUrl = URI.create(object.field(contactUrlField).asString()); URI propertyUrl = URI.create(object.field(propertyUrlField).asString()); URI issueTrackerUrl = URI.create(object.field(issueTrackerUrlField).asString()); List<List<String>> persons = personsFrom(object.field(personsField)); String queue = object.field(queueField).asString(); Optional<String> component = object.field(componentField).valid() ? Optional.of(object.field(componentField).asString()) : Optional.empty(); return Optional.of(new Contact(contactUrl, propertyUrl, issueTrackerUrl, persons, queue, component)); } private void writeContact(Contact contact, Cursor contactCursor) { contactCursor.setString(contactUrlField, contact.url().toString()); contactCursor.setString(propertyUrlField, contact.propertyUrl().toString()); contactCursor.setString(issueTrackerUrlField, contact.issueTrackerUrl().toString()); Cursor personsArray = contactCursor.setArray(personsField); contact.persons().forEach(personList -> { Cursor personArray = personsArray.addArray(); personList.forEach(person -> { Cursor personObject = personArray.addObject(); personObject.setString(personField, person); }); }); contactCursor.setString(queueField, contact.queue()); contact.component().ifPresent(component -> contactCursor.setString(componentField, component)); } private List<List<String>> personsFrom(Inspector array) { List<List<String>> personLists = new ArrayList<>(); array.traverse((ArrayTraverser) (i, personArray) -> { List<String> persons = new ArrayList<>(); personArray.traverse((ArrayTraverser) (j, inspector) -> persons.add(inspector.field("person").asString())); personLists.add(persons); }); return personLists; } private BillingInfo billingInfoFrom(Inspector billingInfoObject) { return new BillingInfo(billingInfoObject.field(customerIdField).asString(), billingInfoObject.field(productCodeField).asString()); } private static Tenant.Type typeOf(String value) { switch (value) { case "athenz": return Tenant.Type.athenz; case "cloud": return Tenant.Type.cloud; default: throw new IllegalArgumentException("Unknown tenant type '" + value + "'."); } } private static String valueOf(Tenant.Type type) { switch (type) { case athenz: return "athenz"; case cloud: return "cloud"; default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'."); } } }
Why creating it at all if shutdown is set. Can you just add it to the outer if as '&& !shutDown'
public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } }
if (shutDown)
public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override @Override public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } } }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override @Override public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } } }
Then you'd return `null`. I thought it better to preserve the existing error message you'd get when trying to use a shut down document access.
public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } }
if (shutDown)
public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override @Override public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } } }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override @Override public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } } }
Alternatively could throw something custom that was clear on this.
public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } }
if (shutDown)
public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override @Override public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } } }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override @Override public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } } }
Throw instead, no point in constructing mbus access and shut it down just to preserve error message in this case. This should never happen, but throwing is probably a good idea.
public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } }
if (shutDown)
public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override @Override public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } } }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override @Override public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } } }
Can do. 'twas my first impulse as well. And, yes, this should never happen, unless someone breaks DI.
public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } }
if (shutDown)
public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override @Override public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } } }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override @Override public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } } }
IMHO, throwing an exception for duplicate calls to `deconstruct()` is ok - it should not happen. This is too defensive.
public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } }
if ( ! shutDown) {
public void deconstruct() { synchronized (monitor) { if ( ! shutDown) { shutDown = true; if (access != null) access.shutdown(); } } }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } } @Override }
class MessageBusDocumentAccessProvider extends AbstractComponent implements Provider<DocumentAccess> { private final DocumentmanagerConfig documentmanagerConfig; private final LoadTypeConfig loadTypeConfig; private final Object monitor = new Object(); private boolean shutDown = false; private DocumentAccess access = null; @Inject public MessageBusDocumentAccessProvider(DocumentmanagerConfig documentmanagerConfig, LoadTypeConfig loadTypeConfig) { this.documentmanagerConfig = documentmanagerConfig; this.loadTypeConfig = loadTypeConfig; } @Override public DocumentAccess get() { synchronized (monitor) { if (access == null) { access = new MessageBusDocumentAccess((MessageBusParams) new MessageBusParams(new LoadTypeSet(loadTypeConfig)).setDocumentmanagerConfig(documentmanagerConfig)); if (shutDown) access.shutdown(); } return access; } } @Override }
Should this component be available for non-application-container-clusters?
public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId); this.name = name; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addSimpleComponent(ThreadPoolProvider.class); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent(com.yahoo.container.core.documentapi.MessageBusDocumentAccessProvider.class.getName()); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addJaxProviders(); }
addSimpleComponent(com.yahoo.container.core.documentapi.MessageBusDocumentAccessProvider.class.getName());
public ContainerCluster(AbstractConfigProducer<?> parent, String subId, String name, DeployState deployState) { super(parent, subId); this.name = name; this.isHostedVespa = stateIsHosted(deployState); this.zone = (deployState != null) ? deployState.zone() : Zone.defaultZone(); componentGroup = new ComponentGroup<>(this, "component"); addComponent(new StatisticsComponent()); addSimpleComponent(AccessLog.class); addSimpleComponent(ThreadPoolProvider.class); addSimpleComponent(com.yahoo.concurrent.classlock.ClassLocking.class); addSimpleComponent(SecurityFilterInvoker.class); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricConsumerProviderProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricProvider"); addSimpleComponent("com.yahoo.container.jdisc.metric.MetricUpdater"); addSimpleComponent(com.yahoo.container.jdisc.LoggingRequestHandler.Context.class); addSimpleComponent(com.yahoo.metrics.simple.MetricManager.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent(com.yahoo.metrics.simple.jdisc.JdiscMetricsFactory.class.getName(), null, MetricProperties.BUNDLE_SYMBOLIC_NAME); addSimpleComponent("com.yahoo.container.jdisc.state.StateMonitor"); addSimpleComponent("com.yahoo.container.jdisc.ContainerThreadFactory"); addSimpleComponent(com.yahoo.container.core.documentapi.MessageBusDocumentAccessProvider.class.getName()); addSimpleComponent("com.yahoo.container.handler.VipStatus"); addSimpleComponent(com.yahoo.container.handler.ClustersStatus.class.getName()); addJaxProviders(); }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, PlatformBundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, RoutingProviderConfig.Producer, ConfigserverConfig.Producer, ThreadpoolConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "/reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final BindingPattern STATE_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT); public static final BindingPattern STATE_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT + "/*"); public static final String ROOT_HANDLER_PATH = "/"; public static final BindingPattern ROOT_HANDLER_BINDING = SystemBindingPattern.fromHttpPath(ROOT_HANDLER_PATH); public static final BindingPattern VIP_HANDLER_BINDING = SystemBindingPattern.fromHttpPath("/status.html"); private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2); addComponent(stateHandler); } public void addDefaultRootHandler() { Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings(SystemBindingPattern.fromHttpPath("/ApplicationStatus")); addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings(VIP_HANDLER_BINDING); addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ @SuppressWarnings("unused") public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, BindingPattern... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (BindingPattern binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } public Http getHttp() { return http; } public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationId().application().value()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getChecksum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * Note that the set of platform bundles cannot change during the jdisc container's lifetime. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(PlatformBundlesConfig.Builder builder) { platformBundles.stream() .map(Path::toString) .forEach(builder::bundlePaths); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(512) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are runnung by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } @Override public void getConfig(RoutingProviderConfig.Builder builder) { builder.enabled(isHostedVespa); } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } protected abstract boolean messageBusEnabled(); }
class ContainerCluster<CONTAINER extends Container> extends AbstractConfigProducer<AbstractConfigProducer<?>> implements ComponentsConfig.Producer, JdiscBindingsConfig.Producer, DocumentmanagerConfig.Producer, ContainerDocumentConfig.Producer, HealthMonitorConfig.Producer, ApplicationMetadataConfig.Producer, PlatformBundlesConfig.Producer, IndexInfoConfig.Producer, IlscriptsConfig.Producer, SchemamappingConfig.Producer, QrSearchersConfig.Producer, QrStartConfig.Producer, QueryProfilesConfig.Producer, PageTemplatesConfig.Producer, SemanticRulesConfig.Producer, DocprocConfig.Producer, ClusterInfoConfig.Producer, RoutingProviderConfig.Producer, ConfigserverConfig.Producer, ThreadpoolConfig.Producer { /** * URI prefix used for internal, usually programmatic, APIs. URIs using this * prefix should never considered available for direct use by customers, and * normal compatibility concerns only applies to libraries using the URIs in * question, not contents served from the URIs themselves. */ public static final String RESERVED_URI_PREFIX = "/reserved-for-internal-use"; public static final String APPLICATION_STATUS_HANDLER_CLASS = "com.yahoo.container.handler.observability.ApplicationStatusHandler"; public static final String BINDINGS_OVERVIEW_HANDLER_CLASS = BindingsOverviewHandler.class.getName(); public static final String LOG_HANDLER_CLASS = com.yahoo.container.handler.LogHandler.class.getName(); public static final String DEFAULT_LINGUISTICS_PROVIDER = "com.yahoo.language.provider.DefaultLinguisticsProvider"; public static final String CMS = "-XX:+UseConcMarkSweepGC -XX:MaxTenuringThreshold=15 -XX:NewRatio=1"; public static final String G1GC = "-XX:+UseG1GC -XX:MaxTenuringThreshold=15"; public static final String STATE_HANDLER_CLASS = "com.yahoo.container.jdisc.state.StateHandler"; public static final BindingPattern STATE_HANDLER_BINDING_1 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT); public static final BindingPattern STATE_HANDLER_BINDING_2 = SystemBindingPattern.fromHttpPath(StateHandler.STATE_API_ROOT + "/*"); public static final String ROOT_HANDLER_PATH = "/"; public static final BindingPattern ROOT_HANDLER_BINDING = SystemBindingPattern.fromHttpPath(ROOT_HANDLER_PATH); public static final BindingPattern VIP_HANDLER_BINDING = SystemBindingPattern.fromHttpPath("/status.html"); private final String name; protected List<CONTAINER> containers = new ArrayList<>(); private Http http; private ProcessingChains processingChains; private ContainerSearch containerSearch; private ContainerDocproc containerDocproc; private ContainerDocumentApi containerDocumentApi; private SecretStore secretStore; private boolean rpcServerEnabled = true; private boolean httpServerEnabled = true; private final Set<Path> platformBundles = new LinkedHashSet<>(); private final List<String> serviceAliases = new ArrayList<>(); private final List<String> endpointAliases = new ArrayList<>(); private final ComponentGroup<Component<?, ?>> componentGroup; private final boolean isHostedVespa; private Map<String, String> concreteDocumentTypes = new LinkedHashMap<>(); private ApplicationMetaData applicationMetaData = null; /** The zone this is deployed in, or the default zone if not on hosted Vespa */ private Zone zone; private String hostClusterId = null; private String jvmGCOptions = null; private String environmentVars = null; public void setZone(Zone zone) { this.zone = zone; } public Zone getZone() { return zone; } public void addDefaultHandlersWithVip() { addDefaultHandlersExceptStatus(); addVipHandler(); } public final void addDefaultHandlersExceptStatus() { addDefaultRootHandler(); addMetricStateHandler(); addApplicationStatusHandler(); } public void addMetricStateHandler() { Handler<AbstractConfigProducer<?>> stateHandler = new Handler<>( new ComponentModel(STATE_HANDLER_CLASS, null, null, null)); stateHandler.addServerBindings(STATE_HANDLER_BINDING_1, STATE_HANDLER_BINDING_2); addComponent(stateHandler); } public void addDefaultRootHandler() { Handler<AbstractConfigProducer<?>> handler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getFromStrings( BINDINGS_OVERVIEW_HANDLER_CLASS, null, null), null)); handler.addServerBindings(ROOT_HANDLER_BINDING); addComponent(handler); } public void addApplicationStatusHandler() { Handler<AbstractConfigProducer<?>> statusHandler = new Handler<>( new ComponentModel(BundleInstantiationSpecification.getInternalHandlerSpecificationFromStrings( APPLICATION_STATUS_HANDLER_CLASS, null), null)); statusHandler.addServerBindings(SystemBindingPattern.fromHttpPath("/ApplicationStatus")); addComponent(statusHandler); } public void addVipHandler() { Handler<?> vipHandler = Handler.fromClassName(FileStatusHandlerComponent.CLASS); vipHandler.addServerBindings(VIP_HANDLER_BINDING); addComponent(vipHandler); } @SuppressWarnings("deprecation") private void addJaxProviders() { addSimpleComponent(com.yahoo.container.xml.providers.DatatypeFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.DocumentBuilderFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SAXParserFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.SchemaFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.TransformerFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLEventFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLInputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XMLOutputFactoryProvider.class); addSimpleComponent(com.yahoo.container.xml.providers.XPathFactoryProvider.class); } public final void addComponent(Component<?, ?> component) { componentGroup.addComponent(component); } public final void addSimpleComponent(String idSpec, String classSpec, String bundleSpec) { addComponent(new SimpleComponent(new ComponentModel(idSpec, classSpec, bundleSpec))); } /** * Removes a component by id * * @return the removed component, or null if it was not present */ @SuppressWarnings("unused") public Component removeComponent(ComponentId componentId) { return componentGroup.removeComponent(componentId); } private void addSimpleComponent(Class<?> clazz) { addSimpleComponent(clazz.getName()); } protected void addSimpleComponent(String className) { addComponent(new SimpleComponent(className)); } public void prepare(DeployState deployState) { applicationMetaData = deployState.getApplicationPackage().getMetaData(); doPrepare(deployState); } protected abstract void doPrepare(DeployState deployState); public String getName() { return name; } public List<CONTAINER> getContainers() { return Collections.unmodifiableList(containers); } public void addContainer(CONTAINER container) { container.setClusterName(name); container.setProp("clustername", name) .setProp("index", this.containers.size()); containers.add(container); } public void addContainers(Collection<CONTAINER> containers) { containers.forEach(this::addContainer); } public void setProcessingChains(ProcessingChains processingChains, BindingPattern... serverBindings) { if (this.processingChains != null) throw new IllegalStateException("ProcessingChains should only be set once."); this.processingChains = processingChains; ProcessingHandler<?> processingHandler = new ProcessingHandler<>( processingChains, "com.yahoo.processing.handler.ProcessingHandler"); for (BindingPattern binding: serverBindings) processingHandler.addServerBindings(binding); addComponent(processingHandler); } ProcessingChains getProcessingChains() { return processingChains; } public SearchChains getSearchChains() { if (containerSearch == null) throw new IllegalStateException("Search components not found in container cluster '" + getSubId() + "': Add <search/> to the cluster in services.xml"); return containerSearch.getChains(); } public ContainerSearch getSearch() { return containerSearch; } public void setSearch(ContainerSearch containerSearch) { this.containerSearch = containerSearch; } public void setHttp(Http http) { this.http = http; addChild(http); } public Http getHttp() { return http; } public ContainerDocproc getDocproc() { return containerDocproc; } public void setDocproc(ContainerDocproc containerDocproc) { this.containerDocproc = containerDocproc; } public ContainerDocumentApi getDocumentApi() { return containerDocumentApi; } public void setDocumentApi(ContainerDocumentApi containerDocumentApi) { this.containerDocumentApi = containerDocumentApi; } public DocprocChains getDocprocChains() { if (containerDocproc == null) throw new IllegalStateException("Document processing components not found in container cluster '" + getSubId() + "': Add <document-processing/> to the cluster in services.xml"); return containerDocproc.getChains(); } @SuppressWarnings("unchecked") public Collection<Handler<?>> getHandlers() { return (Collection<Handler<?>>)(Collection)componentGroup.getComponents(Handler.class); } public void setSecretStore(SecretStore secretStore) { this.secretStore = secretStore; } public Optional<SecretStore> getSecretStore() { return Optional.ofNullable(secretStore); } public Map<ComponentId, Component<?, ?>> getComponentsMap() { return componentGroup.getComponentMap(); } /** Returns all components in this cluster (generic, handlers, chained) */ public Collection<Component<?, ?>> getAllComponents() { List<Component<?, ?>> allComponents = new ArrayList<>(); recursivelyFindAllComponents(allComponents, this); Collections.sort(allComponents); return Collections.unmodifiableCollection(allComponents); } private void recursivelyFindAllComponents(Collection<Component<?, ?>> allComponents, AbstractConfigProducer<?> current) { for (AbstractConfigProducer<?> child: current.getChildren().values()) { if (child instanceof Component) allComponents.add((Component<?, ?>) child); if (!(child instanceof Container)) recursivelyFindAllComponents(allComponents, child); } } @Override public void getConfig(ComponentsConfig.Builder builder) { builder.components.addAll(ComponentsConfigGenerator.generate(getAllComponents())); builder.components(new ComponentsConfig.Components.Builder().id("com.yahoo.container.core.config.HandlersConfigurerDi$RegistriesHack")); } @Override public void getConfig(JdiscBindingsConfig.Builder builder) { builder.handlers.putAll(DiscBindingsConfigGenerator.generate(getHandlers())); } @Override public void getConfig(DocumentmanagerConfig.Builder builder) { if (containerDocproc != null && containerDocproc.isCompressDocuments()) builder.enablecompression(true); } @Override public void getConfig(ContainerDocumentConfig.Builder builder) { for (Map.Entry<String, String> e : concreteDocumentTypes.entrySet()) { ContainerDocumentConfig.Doctype.Builder dtb = new ContainerDocumentConfig.Doctype.Builder(); dtb.type(e.getKey()); dtb.factorycomponent(e.getValue()); builder.doctype(dtb); } } @Override public void getConfig(HealthMonitorConfig.Builder builder) { Monitoring monitoring = getMonitoringService(); if (monitoring != null) { builder.snapshot_interval(monitoring.getIntervalSeconds()); } } @Override public void getConfig(ApplicationMetadataConfig.Builder builder) { if (applicationMetaData != null) { builder.name(applicationMetaData.getApplicationId().application().value()). user(applicationMetaData.getDeployedByUser()). path(applicationMetaData.getDeployPath()). timestamp(applicationMetaData.getDeployTimestamp()). checksum(applicationMetaData.getChecksum()). generation(applicationMetaData.getGeneration()); } } /** * Adds a bundle present at a known location at the target container nodes. * Note that the set of platform bundles cannot change during the jdisc container's lifetime. * * @param bundlePath usually an absolute path, e.g. '$VESPA_HOME/lib/jars/foo.jar' */ public final void addPlatformBundle(Path bundlePath) { platformBundles.add(bundlePath); } @Override public void getConfig(PlatformBundlesConfig.Builder builder) { platformBundles.stream() .map(Path::toString) .forEach(builder::bundlePaths); } @Override public void getConfig(QrSearchersConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QrStartConfig.Builder builder) { builder.jvm .verbosegc(false) .availableProcessors(2) .compressedClassSpaceSize(32) .minHeapsize(32) .heapsize(512) .heapSizeAsPercentageOfPhysicalMemory(0) .gcopts(Objects.requireNonNullElse(jvmGCOptions, G1GC)); if (environmentVars != null) { builder.qrs.env(environmentVars); } } @Override public void getConfig(DocprocConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(PageTemplatesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SemanticRulesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(QueryProfilesConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } @Override public void getConfig(SchemamappingConfig.Builder builder) { if (containerDocproc != null) containerDocproc.getConfig(builder); } @Override public void getConfig(IndexInfoConfig.Builder builder) { if (containerSearch != null) containerSearch.getConfig(builder); } public void initialize(Map<String, AbstractSearchCluster> clusterMap) { if (containerSearch != null) containerSearch.connectSearchClusters(clusterMap); } public void addDefaultSearchAccessLog() { addComponent(new AccessLogComponent(AccessLogComponent.AccessLogType.jsonAccessLog, getName(), isHostedVespa)); } @Override public void getConfig(IlscriptsConfig.Builder builder) { List<AbstractSearchCluster> searchClusters = new ArrayList<>(); searchClusters.addAll(Content.getSearchClusters(getRoot().configModelRepo())); for (AbstractSearchCluster searchCluster : searchClusters) { searchCluster.getConfig(builder); } } @Override public void getConfig(ClusterInfoConfig.Builder builder) { builder.clusterId(name); builder.nodeCount(containers.size()); for (Service service : getDescendantServices()) { builder.services.add(new ClusterInfoConfig.Services.Builder() .index(Integer.parseInt(service.getServicePropertyString("index", "99999"))) .hostname(service.getHostName()) .ports(getPorts(service))); } } /** * Returns a config server config containing the right zone settings (and defaults for the rest). * This is useful to allow applications to find out in which zone they are runnung by having the Zone * object (which is constructed from this config) injected. */ @Override public void getConfig(ConfigserverConfig.Builder builder) { builder.system(zone.system().value()); builder.environment(zone.environment().value()); builder.region(zone.region().value()); } private List<ClusterInfoConfig.Services.Ports.Builder> getPorts(Service service) { List<ClusterInfoConfig.Services.Ports.Builder> builders = new ArrayList<>(); PortsMeta portsMeta = service.getPortsMeta(); for (int i = 0; i < portsMeta.getNumPorts(); i++) { builders.add(new ClusterInfoConfig.Services.Ports.Builder() .number(service.getRelativePort(i)) .tags(ApplicationConfigProducerRoot.getPortTags(portsMeta, i)) ); } return builders; } public boolean isHostedVespa() { return isHostedVespa; } @Override public void getConfig(RoutingProviderConfig.Builder builder) { builder.enabled(isHostedVespa); } public Map<String, String> concreteDocumentTypes() { return concreteDocumentTypes; } /** The configured service aliases for the service in this cluster */ public List<String> serviceAliases() { return serviceAliases; } /** The configured endpoint aliases (fqdn) for the service in this cluster */ public List<String> endpointAliases() { return endpointAliases; } public void setHostClusterId(String clusterId) { hostClusterId = clusterId; } /** * Returns the id of the content cluster which hosts this container cluster, if any. * This is only set with hosted clusters where this container cluster is set up to run on the nodes * of a content cluster. */ public Optional<String> getHostClusterId() { return Optional.ofNullable(hostClusterId); } public void setJvmGCOptions(String opts) { this.jvmGCOptions = opts; } public void setEnvironmentVars(String environmentVars) { this.environmentVars = environmentVars; } public Optional<String> getJvmGCOptions() { return Optional.ofNullable(jvmGCOptions); } public final void setRpcServerEnabled(boolean rpcServerEnabled) { this.rpcServerEnabled = rpcServerEnabled; } boolean rpcServerEnabled() { return rpcServerEnabled; } boolean httpServerEnabled() { return httpServerEnabled; } public void setHttpServerEnabled(boolean httpServerEnabled) { this.httpServerEnabled = httpServerEnabled; } @Override public String toString() { return "container cluster '" + getName() + "'"; } protected abstract boolean messageBusEnabled(); }
Remove or log
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { System.out.println("Found version in zk "); try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { return new Version(0, 0, 0); } } } try (FileReader reader = new FileReader(versionFile)) { System.out.println("Found version in file "); return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
System.out.println("Found version in zk ");
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { } } } try (FileReader reader = new FileReader(versionFile)) { return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { System.out.println("current version: " + currentVersion() + ", stored version: " + storedVersion()); return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
Same as above.
private TenantInfoBillingContact tenantInfoBillingContactFromSlime(Inspector billingObject) { return TenantInfoBillingContact.EmptyBillingContact .withName(billingObject.field("name").asString()) .withEmail(billingObject.field("email").asString()) .withPhone(billingObject.field("phone").asString()) .withAddress(tenantInfoAddressFromSlime(billingObject.field("address"))); }
return TenantInfoBillingContact.EmptyBillingContact
private TenantInfoBillingContact tenantInfoBillingContactFromSlime(Inspector billingObject) { return TenantInfoBillingContact.EMPTY .withName(billingObject.field("name").asString()) .withEmail(billingObject.field("email").asString()) .withPhone(billingObject.field("phone").asString()) .withAddress(tenantInfoAddressFromSlime(billingObject.field("address"))); }
class TenantSerializer { private static final String nameField = "name"; private static final String typeField = "type"; private static final String athenzDomainField = "athenzDomain"; private static final String propertyField = "property"; private static final String propertyIdField = "propertyId"; private static final String creatorField = "creator"; private static final String createdAtField = "createdAt"; private static final String contactField = "contact"; private static final String contactUrlField = "contactUrl"; private static final String propertyUrlField = "propertyUrl"; private static final String issueTrackerUrlField = "issueTrackerUrl"; private static final String personsField = "persons"; private static final String personField = "person"; private static final String queueField = "queue"; private static final String componentField = "component"; private static final String billingInfoField = "billingInfo"; private static final String customerIdField = "customerId"; private static final String productCodeField = "productCode"; private static final String pemDeveloperKeysField = "pemDeveloperKeys"; private static final String tenantInfo = "info"; public Slime toSlime(Tenant tenant) { Slime slime = new Slime(); Cursor tenantObject = slime.setObject(); tenantObject.setString(nameField, tenant.name().value()); tenantObject.setString(typeField, valueOf(tenant.type())); switch (tenant.type()) { case athenz: toSlime((AthenzTenant) tenant, tenantObject); break; case cloud: toSlime((CloudTenant) tenant, tenantObject); break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } return slime; } private void toSlime(AthenzTenant tenant, Cursor tenantObject) { tenantObject.setString(athenzDomainField, tenant.domain().getName()); tenantObject.setString(propertyField, tenant.property().id()); tenant.propertyId().ifPresent(propertyId -> tenantObject.setString(propertyIdField, propertyId.id())); tenant.contact().ifPresent(contact -> { Cursor contactCursor = tenantObject.setObject(contactField); writeContact(contact, contactCursor); }); } private void toSlime(CloudTenant tenant, Cursor root) { var legacyBillingInfo = new BillingInfo("customer", "Vespa"); tenant.creator().ifPresent(creator -> root.setString(creatorField, creator.getName())); developerKeysToSlime(tenant.developerKeys(), root.setArray(pemDeveloperKeysField)); toSlime(legacyBillingInfo, root.setObject(billingInfoField)); toSlime(tenant.info(), root); } private void developerKeysToSlime(BiMap<PublicKey, Principal> keys, Cursor array) { keys.forEach((key, user) -> { Cursor object = array.addObject(); object.setString("key", KeyUtils.toPem(key)); object.setString("user", user.getName()); }); } private void toSlime(BillingInfo billingInfo, Cursor billingInfoObject) { billingInfoObject.setString(customerIdField, billingInfo.customerId()); billingInfoObject.setString(productCodeField, billingInfo.productCode()); } public Tenant tenantFrom(Slime slime) { Inspector tenantObject = slime.get(); Tenant.Type type; type = typeOf(tenantObject.field(typeField).asString()); switch (type) { case athenz: return athenzTenantFrom(tenantObject); case cloud: return cloudTenantFrom(tenantObject); default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'."); } } private AthenzTenant athenzTenantFrom(Inspector tenantObject) { TenantName name = TenantName.from(tenantObject.field(nameField).asString()); AthenzDomain domain = new AthenzDomain(tenantObject.field(athenzDomainField).asString()); Property property = new Property(tenantObject.field(propertyField).asString()); Optional<PropertyId> propertyId = SlimeUtils.optionalString(tenantObject.field(propertyIdField)).map(PropertyId::new); Optional<Contact> contact = contactFrom(tenantObject.field(contactField)); return new AthenzTenant(name, domain, property, propertyId, contact); } private CloudTenant cloudTenantFrom(Inspector tenantObject) { TenantName name = TenantName.from(tenantObject.field(nameField).asString()); Optional<Principal> creator = SlimeUtils.optionalString(tenantObject.field(creatorField)).map(SimplePrincipal::new); BiMap<PublicKey, Principal> developerKeys = developerKeysFromSlime(tenantObject.field(pemDeveloperKeysField)); TenantInfo info = tenantInfoFromSlime(tenantObject.field(tenantInfo)); return new CloudTenant(name, creator, developerKeys, info); } private BiMap<PublicKey, Principal> developerKeysFromSlime(Inspector array) { ImmutableBiMap.Builder<PublicKey, Principal> keys = ImmutableBiMap.builder(); array.traverse((ArrayTraverser) (__, keyObject) -> keys.put(KeyUtils.fromPemEncodedPublicKey(keyObject.field("key").asString()), new SimplePrincipal(keyObject.field("user").asString()))); return keys.build(); } private TenantInfo tenantInfoFromSlime(Inspector infoObject) { return TenantInfo.EmptyInfo .withName(infoObject.field("name").asString()) .withEmail(infoObject.field("email").asString()) .withWebsite(infoObject.field("website").asString()) .withContactName(infoObject.field("contactName").asString()) .withContactEmail(infoObject.field("contactEmail").asString()) .withAddress(tenantInfoAddressFromSlime(infoObject.field("address"))) .withBillingContact(tenantInfoBillingContactFromSlime(infoObject.field("billingContact"))); } private TenantInfoAddress tenantInfoAddressFromSlime(Inspector addressObject) { return TenantInfoAddress.EmptyAddress .withAddressLines(addressObject.field("addressLines").asString()) .withPostalCodeOrZip(addressObject.field("postalCodeOrZip").asString()) .withCity(addressObject.field("city").asString()) .withStateRegionProvince(addressObject.field("stateRegionProvince").asString()) .withCountry(addressObject.field("country").asString()); } public void toSlime(TenantInfo info, Cursor parentCursor) { if (info.isEmpty()) return; Cursor infoCursor = parentCursor.setObject("info"); infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("contactName", info.contactName()); infoCursor.setString("contactEmail", info.contactEmail()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); } private void toSlime(TenantInfoAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.addressLines()); addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.stateRegionProvince()); addressCursor.setString("country", address.country()); } private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("name", billingContact.name()); addressCursor.setString("email", billingContact.email()); addressCursor.setString("phone", billingContact.phone()); toSlime(billingContact.address(), addressCursor); } private Optional<Contact> contactFrom(Inspector object) { if ( ! object.valid()) return Optional.empty(); URI contactUrl = URI.create(object.field(contactUrlField).asString()); URI propertyUrl = URI.create(object.field(propertyUrlField).asString()); URI issueTrackerUrl = URI.create(object.field(issueTrackerUrlField).asString()); List<List<String>> persons = personsFrom(object.field(personsField)); String queue = object.field(queueField).asString(); Optional<String> component = object.field(componentField).valid() ? Optional.of(object.field(componentField).asString()) : Optional.empty(); return Optional.of(new Contact(contactUrl, propertyUrl, issueTrackerUrl, persons, queue, component)); } private void writeContact(Contact contact, Cursor contactCursor) { contactCursor.setString(contactUrlField, contact.url().toString()); contactCursor.setString(propertyUrlField, contact.propertyUrl().toString()); contactCursor.setString(issueTrackerUrlField, contact.issueTrackerUrl().toString()); Cursor personsArray = contactCursor.setArray(personsField); contact.persons().forEach(personList -> { Cursor personArray = personsArray.addArray(); personList.forEach(person -> { Cursor personObject = personArray.addObject(); personObject.setString(personField, person); }); }); contactCursor.setString(queueField, contact.queue()); contact.component().ifPresent(component -> contactCursor.setString(componentField, component)); } private List<List<String>> personsFrom(Inspector array) { List<List<String>> personLists = new ArrayList<>(); array.traverse((ArrayTraverser) (i, personArray) -> { List<String> persons = new ArrayList<>(); personArray.traverse((ArrayTraverser) (j, inspector) -> persons.add(inspector.field("person").asString())); personLists.add(persons); }); return personLists; } private BillingInfo billingInfoFrom(Inspector billingInfoObject) { return new BillingInfo(billingInfoObject.field(customerIdField).asString(), billingInfoObject.field(productCodeField).asString()); } private static Tenant.Type typeOf(String value) { switch (value) { case "athenz": return Tenant.Type.athenz; case "cloud": return Tenant.Type.cloud; default: throw new IllegalArgumentException("Unknown tenant type '" + value + "'."); } } private static String valueOf(Tenant.Type type) { switch (type) { case athenz: return "athenz"; case cloud: return "cloud"; default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'."); } } }
class TenantSerializer { private static final String nameField = "name"; private static final String typeField = "type"; private static final String athenzDomainField = "athenzDomain"; private static final String propertyField = "property"; private static final String propertyIdField = "propertyId"; private static final String creatorField = "creator"; private static final String createdAtField = "createdAt"; private static final String contactField = "contact"; private static final String contactUrlField = "contactUrl"; private static final String propertyUrlField = "propertyUrl"; private static final String issueTrackerUrlField = "issueTrackerUrl"; private static final String personsField = "persons"; private static final String personField = "person"; private static final String queueField = "queue"; private static final String componentField = "component"; private static final String billingInfoField = "billingInfo"; private static final String customerIdField = "customerId"; private static final String productCodeField = "productCode"; private static final String pemDeveloperKeysField = "pemDeveloperKeys"; private static final String tenantInfoField = "info"; public Slime toSlime(Tenant tenant) { Slime slime = new Slime(); Cursor tenantObject = slime.setObject(); tenantObject.setString(nameField, tenant.name().value()); tenantObject.setString(typeField, valueOf(tenant.type())); switch (tenant.type()) { case athenz: toSlime((AthenzTenant) tenant, tenantObject); break; case cloud: toSlime((CloudTenant) tenant, tenantObject); break; default: throw new IllegalArgumentException("Unexpected tenant type '" + tenant.type() + "'."); } return slime; } private void toSlime(AthenzTenant tenant, Cursor tenantObject) { tenantObject.setString(athenzDomainField, tenant.domain().getName()); tenantObject.setString(propertyField, tenant.property().id()); tenant.propertyId().ifPresent(propertyId -> tenantObject.setString(propertyIdField, propertyId.id())); tenant.contact().ifPresent(contact -> { Cursor contactCursor = tenantObject.setObject(contactField); writeContact(contact, contactCursor); }); } private void toSlime(CloudTenant tenant, Cursor root) { var legacyBillingInfo = new BillingInfo("customer", "Vespa"); tenant.creator().ifPresent(creator -> root.setString(creatorField, creator.getName())); developerKeysToSlime(tenant.developerKeys(), root.setArray(pemDeveloperKeysField)); toSlime(legacyBillingInfo, root.setObject(billingInfoField)); toSlime(tenant.info(), root); } private void developerKeysToSlime(BiMap<PublicKey, Principal> keys, Cursor array) { keys.forEach((key, user) -> { Cursor object = array.addObject(); object.setString("key", KeyUtils.toPem(key)); object.setString("user", user.getName()); }); } private void toSlime(BillingInfo billingInfo, Cursor billingInfoObject) { billingInfoObject.setString(customerIdField, billingInfo.customerId()); billingInfoObject.setString(productCodeField, billingInfo.productCode()); } public Tenant tenantFrom(Slime slime) { Inspector tenantObject = slime.get(); Tenant.Type type; type = typeOf(tenantObject.field(typeField).asString()); switch (type) { case athenz: return athenzTenantFrom(tenantObject); case cloud: return cloudTenantFrom(tenantObject); default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'."); } } private AthenzTenant athenzTenantFrom(Inspector tenantObject) { TenantName name = TenantName.from(tenantObject.field(nameField).asString()); AthenzDomain domain = new AthenzDomain(tenantObject.field(athenzDomainField).asString()); Property property = new Property(tenantObject.field(propertyField).asString()); Optional<PropertyId> propertyId = SlimeUtils.optionalString(tenantObject.field(propertyIdField)).map(PropertyId::new); Optional<Contact> contact = contactFrom(tenantObject.field(contactField)); return new AthenzTenant(name, domain, property, propertyId, contact); } private CloudTenant cloudTenantFrom(Inspector tenantObject) { TenantName name = TenantName.from(tenantObject.field(nameField).asString()); Optional<Principal> creator = SlimeUtils.optionalString(tenantObject.field(creatorField)).map(SimplePrincipal::new); BiMap<PublicKey, Principal> developerKeys = developerKeysFromSlime(tenantObject.field(pemDeveloperKeysField)); TenantInfo info = tenantInfoFromSlime(tenantObject.field(tenantInfoField)); return new CloudTenant(name, creator, developerKeys, info); } private BiMap<PublicKey, Principal> developerKeysFromSlime(Inspector array) { ImmutableBiMap.Builder<PublicKey, Principal> keys = ImmutableBiMap.builder(); array.traverse((ArrayTraverser) (__, keyObject) -> keys.put(KeyUtils.fromPemEncodedPublicKey(keyObject.field("key").asString()), new SimplePrincipal(keyObject.field("user").asString()))); return keys.build(); } TenantInfo tenantInfoFromSlime(Inspector infoObject) { if (!infoObject.valid()) return TenantInfo.EMPTY; return TenantInfo.EMPTY .withName(infoObject.field("name").asString()) .withEmail(infoObject.field("email").asString()) .withWebsite(infoObject.field("website").asString()) .withContactName(infoObject.field("contactName").asString()) .withContactEmail(infoObject.field("contactEmail").asString()) .withInvoiceEmail(infoObject.field("invoiceEmail").asString()) .withAddress(tenantInfoAddressFromSlime(infoObject.field("address"))) .withBillingContact(tenantInfoBillingContactFromSlime(infoObject.field("billingContact"))); } private TenantInfoAddress tenantInfoAddressFromSlime(Inspector addressObject) { return TenantInfoAddress.EMPTY .withAddressLines(addressObject.field("addressLines").asString()) .withPostalCodeOrZip(addressObject.field("postalCodeOrZip").asString()) .withCity(addressObject.field("city").asString()) .withStateRegionProvince(addressObject.field("stateRegionProvince").asString()) .withCountry(addressObject.field("country").asString()); } void toSlime(TenantInfo info, Cursor parentCursor) { if (info.isEmpty()) return; Cursor infoCursor = parentCursor.setObject("info"); infoCursor.setString("name", info.name()); infoCursor.setString("email", info.email()); infoCursor.setString("website", info.website()); infoCursor.setString("invoiceEmail", info.invoiceEmail()); infoCursor.setString("contactName", info.contactName()); infoCursor.setString("contactEmail", info.contactEmail()); toSlime(info.address(), infoCursor); toSlime(info.billingContact(), infoCursor); } private void toSlime(TenantInfoAddress address, Cursor parentCursor) { if (address.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("address"); addressCursor.setString("addressLines", address.addressLines()); addressCursor.setString("postalCodeOrZip", address.postalCodeOrZip()); addressCursor.setString("city", address.city()); addressCursor.setString("stateRegionProvince", address.stateRegionProvince()); addressCursor.setString("country", address.country()); } private void toSlime(TenantInfoBillingContact billingContact, Cursor parentCursor) { if (billingContact.isEmpty()) return; Cursor addressCursor = parentCursor.setObject("billingContact"); addressCursor.setString("name", billingContact.name()); addressCursor.setString("email", billingContact.email()); addressCursor.setString("phone", billingContact.phone()); toSlime(billingContact.address(), addressCursor); } private Optional<Contact> contactFrom(Inspector object) { if ( ! object.valid()) return Optional.empty(); URI contactUrl = URI.create(object.field(contactUrlField).asString()); URI propertyUrl = URI.create(object.field(propertyUrlField).asString()); URI issueTrackerUrl = URI.create(object.field(issueTrackerUrlField).asString()); List<List<String>> persons = personsFrom(object.field(personsField)); String queue = object.field(queueField).asString(); Optional<String> component = object.field(componentField).valid() ? Optional.of(object.field(componentField).asString()) : Optional.empty(); return Optional.of(new Contact(contactUrl, propertyUrl, issueTrackerUrl, persons, queue, component)); } private void writeContact(Contact contact, Cursor contactCursor) { contactCursor.setString(contactUrlField, contact.url().toString()); contactCursor.setString(propertyUrlField, contact.propertyUrl().toString()); contactCursor.setString(issueTrackerUrlField, contact.issueTrackerUrl().toString()); Cursor personsArray = contactCursor.setArray(personsField); contact.persons().forEach(personList -> { Cursor personArray = personsArray.addArray(); personList.forEach(person -> { Cursor personObject = personArray.addObject(); personObject.setString(personField, person); }); }); contactCursor.setString(queueField, contact.queue()); contact.component().ifPresent(component -> contactCursor.setString(componentField, component)); } private List<List<String>> personsFrom(Inspector array) { List<List<String>> personLists = new ArrayList<>(); array.traverse((ArrayTraverser) (i, personArray) -> { List<String> persons = new ArrayList<>(); personArray.traverse((ArrayTraverser) (j, inspector) -> persons.add(inspector.field("person").asString())); personLists.add(persons); }); return personLists; } private BillingInfo billingInfoFrom(Inspector billingInfoObject) { return new BillingInfo(billingInfoObject.field(customerIdField).asString(), billingInfoObject.field(productCodeField).asString()); } private static Tenant.Type typeOf(String value) { switch (value) { case "athenz": return Tenant.Type.athenz; case "cloud": return Tenant.Type.cloud; default: throw new IllegalArgumentException("Unknown tenant type '" + value + "'."); } } private static String valueOf(Tenant.Type type) { switch (type) { case athenz: return "athenz"; case cloud: return "cloud"; default: throw new IllegalArgumentException("Unexpected tenant type '" + type + "'."); } } }
Consider falling back to the file.
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { System.out.println("Found version in zk "); try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { return new Version(0, 0, 0); } } } try (FileReader reader = new FileReader(versionFile)) { System.out.println("Found version in file "); return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
return new Version(0, 0, 0);
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { } } } try (FileReader reader = new FileReader(versionFile)) { return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { System.out.println("current version: " + currentVersion() + ", stored version: " + storedVersion()); return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
Remove or log
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { System.out.println("Found version in zk "); try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { return new Version(0, 0, 0); } } } try (FileReader reader = new FileReader(versionFile)) { System.out.println("Found version in file "); return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
System.out.println("Found version in file ");
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { } } } try (FileReader reader = new FileReader(versionFile)) { return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { System.out.println("current version: " + currentVersion() + ", stored version: " + storedVersion()); return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
Good point, fixed
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { System.out.println("Found version in zk "); try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { return new Version(0, 0, 0); } } } try (FileReader reader = new FileReader(versionFile)) { System.out.println("Found version in file "); return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
return new Version(0, 0, 0);
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { } } } try (FileReader reader = new FileReader(versionFile)) { return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { System.out.println("current version: " + currentVersion() + ", stored version: " + storedVersion()); return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
Fixed
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { System.out.println("Found version in zk "); try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { return new Version(0, 0, 0); } } } try (FileReader reader = new FileReader(versionFile)) { System.out.println("Found version in file "); return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
System.out.println("Found version in zk ");
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { } } } try (FileReader reader = new FileReader(versionFile)) { return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { System.out.println("current version: " + currentVersion() + ", stored version: " + storedVersion()); return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
Remove the old key name in the hashmap. Since catalog operation is a very low frequency event, I only keep the name to DatasourceIf mapping.
public void replayAlterCatalogName(CatalogLog log) { writeLock(); try { DataSourceIf ds = nameToCatalogs.remove(log.getCatalogName()); ds.modifyDatasourceName(log.getNewCatalogName()); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } }
nameToCatalogs.put(ds.getName(), ds);
public void replayAlterCatalogName(CatalogLog log) { writeLock(); try { DataSourceIf ds = nameToCatalogs.remove(log.getCatalogName()); ds.modifyDatasourceName(log.getNewCatalogName()); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } }
class DataSourceMgr implements Writable { private static final Logger LOG = LogManager.getLogger(DataSourceMgr.class); private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true); private final Map<String, DataSourceIf> nameToCatalogs = Maps.newConcurrentMap(); private InternalDataSource internalDataSource; public DataSourceMgr() { initInternalDataSource(); } private void initInternalDataSource() { internalDataSource = new InternalDataSource(); nameToCatalogs.put(internalDataSource.getName(), internalDataSource); } public InternalDataSource getInternalDataSource() { return internalDataSource; } private void writeLock() { lock.writeLock().lock(); } private void writeUnlock() { lock.writeLock().unlock(); } private void readLock() { lock.readLock().lock(); } private void readUnlock() { lock.readLock().unlock(); } /** * Create and hold the catalog instance and write the meta log. */ public void createCatalog(CreateCatalogStmt stmt) throws UserException { if (stmt.isSetIfNotExists() && nameToCatalogs.containsKey(stmt.getCatalogName())) { LOG.warn("Catalog {} is already exist.", stmt.getCatalogName()); return; } if (nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("Catalog had already exist with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayCreateCatalog(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_CREATE_DS, log); } /** * Remove the catalog instance by name and write the meta log. */ public void dropCatalog(DropCatalogStmt stmt) throws UserException { if (stmt.isSetIfExists() && !nameToCatalogs.containsKey(stmt.getCatalogName())) { LOG.warn("Non catalog {} is found.", stmt.getCatalogName()); return; } if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayDropCatalog(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_DROP_DS, log); } /** * Modify the catalog name into a new one and write the meta log. */ public void alterCatalogName(AlterCatalogNameStmt stmt) throws UserException { if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayAlterCatalogName(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_ALTER_DS_NAME, log); } /** * Modify the catalog property and write the meta log. */ public void alterCatalogProps(AlterCatalogPropertyStmt stmt) throws UserException { if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } if (!nameToCatalogs.get(stmt.getCatalogName()) .getType().equalsIgnoreCase(stmt.getNewProperties().get("type"))) { throw new DdlException("Can't modify the type of catalog property with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayAlterCatalogProps(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_ALTER_DS_PROPS, log); } /** * List all catalog or get the special catalog with a name. */ public ShowResultSet showCatalogs(ShowCatalogStmt showStmt) throws AnalysisException { List<List<String>> rows = Lists.newArrayList(); readLock(); try { if (showStmt.getCatalogName() == null) { for (DataSourceIf ds : nameToCatalogs.values()) { List<String> row = Lists.newArrayList(); row.add(ds.getName()); row.add(ds.getType()); rows.add(row); } } else { if (!nameToCatalogs.containsKey(showStmt.getCatalogName())) { throw new AnalysisException("No catalog found with name: " + showStmt.getCatalogName()); } DataSourceIf ds = nameToCatalogs.get(showStmt.getCatalogName()); for (Map.Entry<String, String> elem : ds.getProperties().entrySet()) { List<String> row = Lists.newArrayList(); row.add(elem.getKey()); row.add(elem.getValue()); rows.add(row); } } } finally { readUnlock(); } return new ShowResultSet(showStmt.getMetaData(), rows); } /** * Reply for create catalog event. */ public void replayCreateCatalog(CatalogLog log) { writeLock(); try { DataSourceIf ds = CatalogFactory.constructorFromLog(log); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } } /** * Reply for drop catalog event. */ public void replayDropCatalog(CatalogLog log) { writeLock(); try { nameToCatalogs.remove(log.getCatalogName()); } finally { writeUnlock(); } } /** * Reply for alter catalog name event. */ /** * Reply for alter catalog props event. */ public void replayAlterCatalogProps(CatalogLog log) { writeLock(); try { DataSourceIf ds = nameToCatalogs.remove(log.getCatalogName()); ds.modifyDatasourceProps(log.getNewProps()); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } } @Override public void write(DataOutput out) throws IOException { Text.writeString(out, GsonUtils.GSON.toJson(this)); } public static DataSourceMgr read(DataInput in) throws IOException { String json = Text.readString(in); return GsonUtils.GSON.fromJson(json, DataSourceMgr.class); } }
class DataSourceMgr implements Writable { private static final Logger LOG = LogManager.getLogger(DataSourceMgr.class); private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(true); private final Map<String, DataSourceIf> nameToCatalogs = Maps.newConcurrentMap(); private InternalDataSource internalDataSource; public DataSourceMgr() { initInternalDataSource(); } private void initInternalDataSource() { internalDataSource = new InternalDataSource(); nameToCatalogs.put(internalDataSource.getName(), internalDataSource); } public InternalDataSource getInternalDataSource() { return internalDataSource; } private void writeLock() { lock.writeLock().lock(); } private void writeUnlock() { lock.writeLock().unlock(); } private void readLock() { lock.readLock().lock(); } private void readUnlock() { lock.readLock().unlock(); } /** * Create and hold the catalog instance and write the meta log. */ public void createCatalog(CreateCatalogStmt stmt) throws UserException { if (stmt.isSetIfNotExists() && nameToCatalogs.containsKey(stmt.getCatalogName())) { LOG.warn("Catalog {} is already exist.", stmt.getCatalogName()); return; } if (nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("Catalog had already exist with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayCreateCatalog(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_CREATE_DS, log); } /** * Remove the catalog instance by name and write the meta log. */ public void dropCatalog(DropCatalogStmt stmt) throws UserException { if (stmt.isSetIfExists() && !nameToCatalogs.containsKey(stmt.getCatalogName())) { LOG.warn("Non catalog {} is found.", stmt.getCatalogName()); return; } if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayDropCatalog(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_DROP_DS, log); } /** * Modify the catalog name into a new one and write the meta log. */ public void alterCatalogName(AlterCatalogNameStmt stmt) throws UserException { if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayAlterCatalogName(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_ALTER_DS_NAME, log); } /** * Modify the catalog property and write the meta log. */ public void alterCatalogProps(AlterCatalogPropertyStmt stmt) throws UserException { if (!nameToCatalogs.containsKey(stmt.getCatalogName())) { throw new DdlException("No catalog found with name: " + stmt.getCatalogName()); } if (!nameToCatalogs.get(stmt.getCatalogName()) .getType().equalsIgnoreCase(stmt.getNewProperties().get("type"))) { throw new DdlException("Can't modify the type of catalog property with name: " + stmt.getCatalogName()); } CatalogLog log = CatalogFactory.constructorCatalogLog(stmt); replayAlterCatalogProps(log); Catalog.getCurrentCatalog().getEditLog().logDatasourceLog(OperationType.OP_ALTER_DS_PROPS, log); } /** * List all catalog or get the special catalog with a name. */ public ShowResultSet showCatalogs(ShowCatalogStmt showStmt) throws AnalysisException { List<List<String>> rows = Lists.newArrayList(); readLock(); try { if (showStmt.getCatalogName() == null) { for (DataSourceIf ds : nameToCatalogs.values()) { List<String> row = Lists.newArrayList(); row.add(ds.getName()); row.add(ds.getType()); rows.add(row); } } else { if (!nameToCatalogs.containsKey(showStmt.getCatalogName())) { throw new AnalysisException("No catalog found with name: " + showStmt.getCatalogName()); } DataSourceIf ds = nameToCatalogs.get(showStmt.getCatalogName()); for (Map.Entry<String, String> elem : ds.getProperties().entrySet()) { List<String> row = Lists.newArrayList(); row.add(elem.getKey()); row.add(elem.getValue()); rows.add(row); } } } finally { readUnlock(); } return new ShowResultSet(showStmt.getMetaData(), rows); } /** * Reply for create catalog event. */ public void replayCreateCatalog(CatalogLog log) { writeLock(); try { DataSourceIf ds = CatalogFactory.constructorFromLog(log); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } } /** * Reply for drop catalog event. */ public void replayDropCatalog(CatalogLog log) { writeLock(); try { nameToCatalogs.remove(log.getCatalogName()); } finally { writeUnlock(); } } /** * Reply for alter catalog name event. */ /** * Reply for alter catalog props event. */ public void replayAlterCatalogProps(CatalogLog log) { writeLock(); try { DataSourceIf ds = nameToCatalogs.remove(log.getCatalogName()); ds.modifyDatasourceProps(log.getNewProps()); nameToCatalogs.put(ds.getName(), ds); } finally { writeUnlock(); } } @Override public void write(DataOutput out) throws IOException { Text.writeString(out, GsonUtils.GSON.toJson(this)); } public static DataSourceMgr read(DataInput in) throws IOException { String json = Text.readString(in); return GsonUtils.GSON.fromJson(json, DataSourceMgr.class); } }
Fixed
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { System.out.println("Found version in zk "); try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { return new Version(0, 0, 0); } } } try (FileReader reader = new FileReader(versionFile)) { System.out.println("Found version in file "); return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
System.out.println("Found version in file ");
public Version storedVersion() { if (distributeApplicationPackage.value()) { Optional<byte[]> version = curator.getData(versionPath); if(version.isPresent()) { try { return Version.fromString(Utf8.toString(version.get())); } catch (Exception e) { } } } try (FileReader reader = new FileReader(versionFile)) { return Version.fromString(IOUtils.readAll(reader)); } catch (Exception e) { return new Version(0, 0, 0); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { System.out.println("current version: " + currentVersion() + ", stored version: " + storedVersion()); return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
class VersionState { static final Path versionPath = Path.fromString("/config/v2/vespa_version"); private final File versionFile; private final Curator curator; private final BooleanFlag distributeApplicationPackage; @Inject public VersionState(ConfigserverConfig config, Curator curator, FlagSource flagsource) { this(new File(Defaults.getDefaults().underVespaHome(config.configServerDBDir()), "vespa_version"), curator, flagsource); } public VersionState(File versionFile, Curator curator, FlagSource flagSource) { this.versionFile = versionFile; this.curator = curator; this.distributeApplicationPackage = Flags.CONFIGSERVER_DISTRIBUTE_APPLICATION_PACKAGE.bindTo(flagSource); } public boolean isUpgraded() { return currentVersion().compareTo(storedVersion()) > 0; } public void saveNewVersion() { saveNewVersion(currentVersion().toFullString()); } public void saveNewVersion(String vespaVersion) { curator.set(versionPath, Utf8.toBytes(vespaVersion)); try (FileWriter writer = new FileWriter(versionFile)) { writer.write(vespaVersion); } catch (IOException e) { throw new RuntimeException(e); } } public Version currentVersion() { return new Version(VespaVersion.major, VespaVersion.minor, VespaVersion.micro); } File versionFile() { return versionFile; } @Override public String toString() { return String.format("Current version:%s, stored version:%s", currentVersion(), storedVersion()); } }
Unintentional reformatting?
public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockInfo threadLockInfo = getThreadLockInfo(); threadLockInfo.invokingAcquire(timeout); try { if ( ! mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS)) { threadLockInfo.acquireTimedOut(); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockInfo.lockAcquired(); if ( ! lock.tryLock()) { release(); threadLockInfo.failedToAcquireReentrantLock(); throw new IllegalStateException("InterProcessMutex acquired, but guarded lock held by someone else, for lock '" + lockPath + "'"); } } catch (UncheckedTimeoutException | IllegalStateException e) { throw e; } catch (Exception e) { throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } }
" to acquire lock '" + lockPath + "'");
public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockStats threadLockStats = ThreadLockStats.getCurrentThreadLockStats(); threadLockStats.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockStats.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockStats.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockStats.lockAcquired(lockPath); }
class Lock implements Mutex { private final ReentrantLock lock; private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this.lockPath = lockPath; this.lock = new ReentrantLock(true); mutex = curator.createMutex(lockPath); } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ @Override public void close() { try { lock.unlock(); } finally { release(); } } private void release() { getThreadLockInfo().lockReleased(); try { mutex.release(); } catch (Exception e) { throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } } private ThreadLockInfo getThreadLockInfo() { return ThreadLockInfo.getCurrentThreadLockInfo(lockPath, lock); } }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ @Override public void close() { try { mutex.release(); ThreadLockStats.getCurrentThreadLockStats().lockReleased(lockPath); } catch (Exception e) { ThreadLockStats.getCurrentThreadLockStats().lockReleaseFailed(lockPath); throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } } }
Fixed
public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockInfo threadLockInfo = getThreadLockInfo(); threadLockInfo.invokingAcquire(timeout); try { if ( ! mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS)) { threadLockInfo.acquireTimedOut(); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockInfo.lockAcquired(); if ( ! lock.tryLock()) { release(); threadLockInfo.failedToAcquireReentrantLock(); throw new IllegalStateException("InterProcessMutex acquired, but guarded lock held by someone else, for lock '" + lockPath + "'"); } } catch (UncheckedTimeoutException | IllegalStateException e) { throw e; } catch (Exception e) { throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } }
" to acquire lock '" + lockPath + "'");
public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockStats threadLockStats = ThreadLockStats.getCurrentThreadLockStats(); threadLockStats.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockStats.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockStats.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockStats.lockAcquired(lockPath); }
class Lock implements Mutex { private final ReentrantLock lock; private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this.lockPath = lockPath; this.lock = new ReentrantLock(true); mutex = curator.createMutex(lockPath); } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ @Override public void close() { try { lock.unlock(); } finally { release(); } } private void release() { getThreadLockInfo().lockReleased(); try { mutex.release(); } catch (Exception e) { throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } } private ThreadLockInfo getThreadLockInfo() { return ThreadLockInfo.getCurrentThreadLockInfo(lockPath, lock); } }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ @Override public void close() { try { mutex.release(); ThreadLockStats.getCurrentThreadLockStats().lockReleased(lockPath); } catch (Exception e) { ThreadLockStats.getCurrentThreadLockStats().lockReleaseFailed(lockPath); throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } } }
Shouldn't this happen after the `mutex.release()` below?
public void close() { ThreadLockInfo.getCurrentThreadLockInfo().lockReleased(lockPath); try { mutex.release(); } catch (Exception e) { throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } }
ThreadLockInfo.getCurrentThreadLockInfo().lockReleased(lockPath);
public void close() { try { mutex.release(); ThreadLockStats.getCurrentThreadLockStats().lockReleased(lockPath); } catch (Exception e) { ThreadLockStats.getCurrentThreadLockStats().lockReleaseFailed(lockPath); throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockInfo threadLockInfo = ThreadLockInfo.getCurrentThreadLockInfo(); threadLockInfo.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockInfo.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockInfo.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockInfo.lockAcquired(lockPath); } @Override }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockStats threadLockStats = ThreadLockStats.getCurrentThreadLockStats(); threadLockStats.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockStats.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockStats.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockStats.lockAcquired(lockPath); } @Override }
To get any Instant.now() to include the time in release?
public void close() { ThreadLockInfo.getCurrentThreadLockInfo().lockReleased(lockPath); try { mutex.release(); } catch (Exception e) { throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } }
ThreadLockInfo.getCurrentThreadLockInfo().lockReleased(lockPath);
public void close() { try { mutex.release(); ThreadLockStats.getCurrentThreadLockStats().lockReleased(lockPath); } catch (Exception e) { ThreadLockStats.getCurrentThreadLockStats().lockReleaseFailed(lockPath); throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockInfo threadLockInfo = ThreadLockInfo.getCurrentThreadLockInfo(); threadLockInfo.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockInfo.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockInfo.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockInfo.lockAcquired(lockPath); } @Override }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockStats threadLockStats = ThreadLockStats.getCurrentThreadLockStats(); threadLockStats.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockStats.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockStats.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockStats.lockAcquired(lockPath); } @Override }
No, currently if `mutex.release()` fails you will still have tagged it as released in `ThreadLockInfo`.
public void close() { ThreadLockInfo.getCurrentThreadLockInfo().lockReleased(lockPath); try { mutex.release(); } catch (Exception e) { throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } }
ThreadLockInfo.getCurrentThreadLockInfo().lockReleased(lockPath);
public void close() { try { mutex.release(); ThreadLockStats.getCurrentThreadLockStats().lockReleased(lockPath); } catch (Exception e) { ThreadLockStats.getCurrentThreadLockStats().lockReleaseFailed(lockPath); throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockInfo threadLockInfo = ThreadLockInfo.getCurrentThreadLockInfo(); threadLockInfo.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockInfo.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockInfo.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockInfo.lockAcquired(lockPath); } @Override }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockStats threadLockStats = ThreadLockStats.getCurrentThreadLockStats(); threadLockStats.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockStats.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockStats.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockStats.lockAcquired(lockPath); } @Override }
I could count an error, but the lock must be assumed to be released whether an exception is thrown or not
public void close() { ThreadLockInfo.getCurrentThreadLockInfo().lockReleased(lockPath); try { mutex.release(); } catch (Exception e) { throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } }
ThreadLockInfo.getCurrentThreadLockInfo().lockReleased(lockPath);
public void close() { try { mutex.release(); ThreadLockStats.getCurrentThreadLockStats().lockReleased(lockPath); } catch (Exception e) { ThreadLockStats.getCurrentThreadLockStats().lockReleaseFailed(lockPath); throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockInfo threadLockInfo = ThreadLockInfo.getCurrentThreadLockInfo(); threadLockInfo.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockInfo.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockInfo.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockInfo.lockAcquired(lockPath); } @Override }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockStats threadLockStats = ThreadLockStats.getCurrentThreadLockStats(); threadLockStats.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockStats.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockStats.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockStats.lockAcquired(lockPath); } @Override }
Added count
public void close() { ThreadLockInfo.getCurrentThreadLockInfo().lockReleased(lockPath); try { mutex.release(); } catch (Exception e) { throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } }
ThreadLockInfo.getCurrentThreadLockInfo().lockReleased(lockPath);
public void close() { try { mutex.release(); ThreadLockStats.getCurrentThreadLockStats().lockReleased(lockPath); } catch (Exception e) { ThreadLockStats.getCurrentThreadLockStats().lockReleaseFailed(lockPath); throw new RuntimeException("Exception releasing lock '" + lockPath + "'"); } }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockInfo threadLockInfo = ThreadLockInfo.getCurrentThreadLockInfo(); threadLockInfo.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockInfo.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockInfo.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockInfo.lockAcquired(lockPath); } @Override }
class Lock implements Mutex { private final InterProcessLock mutex; private final String lockPath; public Lock(String lockPath, Curator curator) { this(lockPath, curator.createMutex(lockPath)); } /** Public for testing only */ public Lock(String lockPath, InterProcessLock mutex) { this.lockPath = lockPath; this.mutex = mutex; } /** Take the lock with the given timeout. This may be called multiple times from the same thread - each matched by a close */ public void acquire(Duration timeout) throws UncheckedTimeoutException { ThreadLockStats threadLockStats = ThreadLockStats.getCurrentThreadLockStats(); threadLockStats.invokingAcquire(lockPath, timeout); final boolean acquired; try { acquired = mutex.acquire(timeout.toMillis(), TimeUnit.MILLISECONDS); } catch (Exception e) { threadLockStats.acquireFailed(lockPath); throw new RuntimeException("Exception acquiring lock '" + lockPath + "'", e); } if (!acquired) { threadLockStats.acquireTimedOut(lockPath); throw new UncheckedTimeoutException("Timed out after waiting " + timeout + " to acquire lock '" + lockPath + "'"); } threadLockStats.lockAcquired(lockPath); } @Override }
"file server pull"
private FileServer(ConnectionPool connectionPool, File rootDir) { this.downloader = new FileDownloader(connectionPool); this.root = new FileDirectory(rootDir); this.pushExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("file server push")); this.pullExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("file server push")); }
new DaemonThreadFactory("file server push"));
private FileServer(ConnectionPool connectionPool, File rootDir) { this.downloader = new FileDownloader(connectionPool); this.root = new FileDirectory(rootDir); this.pushExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("file server push")); this.pullExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("file server pull")); }
class ReplayStatus { private final int code; private final String description; ReplayStatus(int code, String description) { this.code = code; this.description = description; } public boolean ok() { return code == 0; } public int getCode() { return code; } public String getDescription() { return description; } }
class ReplayStatus { private final int code; private final String description; ReplayStatus(int code, String description) { this.code = code; this.description = description; } public boolean ok() { return code == 0; } public int getCode() { return code; } public String getDescription() { return description; } }
Fixed
private FileServer(ConnectionPool connectionPool, File rootDir) { this.downloader = new FileDownloader(connectionPool); this.root = new FileDirectory(rootDir); this.pushExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("file server push")); this.pullExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("file server push")); }
new DaemonThreadFactory("file server push"));
private FileServer(ConnectionPool connectionPool, File rootDir) { this.downloader = new FileDownloader(connectionPool); this.root = new FileDirectory(rootDir); this.pushExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("file server push")); this.pullExecutor = Executors.newFixedThreadPool(Math.max(8, Runtime.getRuntime().availableProcessors()), new DaemonThreadFactory("file server pull")); }
class ReplayStatus { private final int code; private final String description; ReplayStatus(int code, String description) { this.code = code; this.description = description; } public boolean ok() { return code == 0; } public int getCode() { return code; } public String getDescription() { return description; } }
class ReplayStatus { private final int code; private final String description; ReplayStatus(int code, String description) { this.code = code; this.description = description; } public boolean ok() { return code == 0; } public int getCode() { return code; } public String getDescription() { return description; } }
Explicit injection 😱
private static void addRestApiHandler(ContainerCluster<?> cluster, Options options) { String handlerClassName = options.useNewRestapiHandler ? "com.yahoo.document.restapi.resource.DocumentV1ApiHandler" : "com.yahoo.document.restapi.resource.RestApi"; var handler = newVespaClientHandler(handlerClassName, "/document/v1/*", options); cluster.addComponent(handler); if (!options.useNewRestapiHandler) { var executor = new Threadpool( "restapi-handler", cluster, options.restApiThreadpoolOptions, options.feedThreadPoolSizeFactor); handler.inject(executor); handler.addComponent(executor); } }
handler.inject(executor);
private static void addRestApiHandler(ContainerCluster<?> cluster, Options options) { String handlerClassName = options.useNewRestapiHandler ? "com.yahoo.document.restapi.resource.DocumentV1ApiHandler" : "com.yahoo.document.restapi.resource.RestApi"; var handler = newVespaClientHandler(handlerClassName, "/document/v1/*", options); cluster.addComponent(handler); if (!options.useNewRestapiHandler) { var executor = new Threadpool( "restapi-handler", cluster, options.restApiThreadpoolOptions, options.feedThreadPoolSizeFactor); handler.inject(executor); handler.addComponent(executor); } }
class ContainerDocumentApi { private static final int FALLBACK_MAX_POOL_SIZE = 0; private static final int FALLBACK_CORE_POOL_SIZE = 0; public ContainerDocumentApi(ContainerCluster<?> cluster, Options options) { addRestApiHandler(cluster, options); addFeedHandler(cluster, options); } private static void addFeedHandler(ContainerCluster<?> cluster, Options options) { String bindingSuffix = ContainerCluster.RESERVED_URI_PREFIX + "/feedapi"; var handler = newVespaClientHandler( "com.yahoo.vespa.http.server.FeedHandler", bindingSuffix, options); cluster.addComponent(handler); var executor = new Threadpool( "feedapi-handler", cluster, options.feedApiThreadpoolOptions, options.feedThreadPoolSizeFactor); handler.inject(executor); handler.addComponent(executor); } private static Handler<AbstractConfigProducer<?>> newVespaClientHandler( String componentId, String bindingSuffix, Options options) { Handler<AbstractConfigProducer<?>> handler = new Handler<>(new ComponentModel( BundleInstantiationSpecification.getFromStrings(componentId, null, "vespaclient-container-plugin"), "")); if (options.bindings.isEmpty()) { handler.addServerBindings( SystemBindingPattern.fromHttpPath(bindingSuffix), SystemBindingPattern.fromHttpPath(bindingSuffix + '/')); } else { for (String rootBinding : options.bindings) { String pathWithoutLeadingSlash = bindingSuffix.substring(1); handler.addServerBindings( UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash), UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash + '/')); } } return handler; } public static final class Options { private final Collection<String> bindings; private final ContainerThreadpool.UserOptions restApiThreadpoolOptions; private final ContainerThreadpool.UserOptions feedApiThreadpoolOptions; private final double feedThreadPoolSizeFactor; private final boolean useNewRestapiHandler; public Options(Collection<String> bindings, ContainerThreadpool.UserOptions restApiThreadpoolOptions, ContainerThreadpool.UserOptions feedApiThreadpoolOptions, double feedThreadPoolSizeFactor, boolean useNewRestapiHandler) { this.bindings = Collections.unmodifiableCollection(bindings); this.restApiThreadpoolOptions = restApiThreadpoolOptions; this.feedApiThreadpoolOptions = feedApiThreadpoolOptions; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; this.useNewRestapiHandler = useNewRestapiHandler; } } private static class Threadpool extends ContainerThreadpool { private final ContainerCluster<?> cluster; private final double feedThreadPoolSizeFactor; Threadpool(String name, ContainerCluster<?> cluster, ContainerThreadpool.UserOptions threadpoolOptions, double feedThreadPoolSizeFactor ) { super(name, threadpoolOptions); this.cluster = cluster; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; } @Override public void getConfig(ContainerThreadpoolConfig.Builder builder) { super.getConfig(builder); if (hasUserOptions()) return; builder.maxThreads(maxPoolSize()); builder.minThreads(minPoolSize()); builder.queueSize(500); } private int maxPoolSize() { double vcpu = vcpu(cluster); if (vcpu == 0) return FALLBACK_MAX_POOL_SIZE; return Math.max(2, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor)); } private int minPoolSize() { double vcpu = vcpu(cluster); if (vcpu == 0) return FALLBACK_CORE_POOL_SIZE; return Math.max(1, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor * 0.5)); } } }
class ContainerDocumentApi { private static final int FALLBACK_MAX_POOL_SIZE = 0; private static final int FALLBACK_CORE_POOL_SIZE = 0; public ContainerDocumentApi(ContainerCluster<?> cluster, Options options) { addRestApiHandler(cluster, options); addFeedHandler(cluster, options); } private static void addFeedHandler(ContainerCluster<?> cluster, Options options) { String bindingSuffix = ContainerCluster.RESERVED_URI_PREFIX + "/feedapi"; var handler = newVespaClientHandler( "com.yahoo.vespa.http.server.FeedHandler", bindingSuffix, options); cluster.addComponent(handler); var executor = new Threadpool( "feedapi-handler", cluster, options.feedApiThreadpoolOptions, options.feedThreadPoolSizeFactor); handler.inject(executor); handler.addComponent(executor); } private static Handler<AbstractConfigProducer<?>> newVespaClientHandler( String componentId, String bindingSuffix, Options options) { Handler<AbstractConfigProducer<?>> handler = new Handler<>(new ComponentModel( BundleInstantiationSpecification.getFromStrings(componentId, null, "vespaclient-container-plugin"), "")); if (options.bindings.isEmpty()) { handler.addServerBindings( SystemBindingPattern.fromHttpPath(bindingSuffix), SystemBindingPattern.fromHttpPath(bindingSuffix + '/')); } else { for (String rootBinding : options.bindings) { String pathWithoutLeadingSlash = bindingSuffix.substring(1); handler.addServerBindings( UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash), UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash + '/')); } } return handler; } public static final class Options { private final Collection<String> bindings; private final ContainerThreadpool.UserOptions restApiThreadpoolOptions; private final ContainerThreadpool.UserOptions feedApiThreadpoolOptions; private final double feedThreadPoolSizeFactor; private final boolean useNewRestapiHandler; public Options(Collection<String> bindings, ContainerThreadpool.UserOptions restApiThreadpoolOptions, ContainerThreadpool.UserOptions feedApiThreadpoolOptions, double feedThreadPoolSizeFactor, boolean useNewRestapiHandler) { this.bindings = Collections.unmodifiableCollection(bindings); this.restApiThreadpoolOptions = restApiThreadpoolOptions; this.feedApiThreadpoolOptions = feedApiThreadpoolOptions; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; this.useNewRestapiHandler = useNewRestapiHandler; } } private static class Threadpool extends ContainerThreadpool { private final ContainerCluster<?> cluster; private final double feedThreadPoolSizeFactor; Threadpool(String name, ContainerCluster<?> cluster, ContainerThreadpool.UserOptions threadpoolOptions, double feedThreadPoolSizeFactor ) { super(name, threadpoolOptions); this.cluster = cluster; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; } @Override public void getConfig(ContainerThreadpoolConfig.Builder builder) { super.getConfig(builder); if (hasUserOptions()) return; builder.maxThreads(maxPoolSize()); builder.minThreads(minPoolSize()); builder.queueSize(500); } private int maxPoolSize() { double vcpu = vcpu(cluster); if (vcpu == 0) return FALLBACK_MAX_POOL_SIZE; return Math.max(2, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor)); } private int minPoolSize() { double vcpu = vcpu(cluster); if (vcpu == 0) return FALLBACK_CORE_POOL_SIZE; return Math.max(1, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor * 0.5)); } } }
This is how the cool kids on the street do it
private static void addRestApiHandler(ContainerCluster<?> cluster, Options options) { String handlerClassName = options.useNewRestapiHandler ? "com.yahoo.document.restapi.resource.DocumentV1ApiHandler" : "com.yahoo.document.restapi.resource.RestApi"; var handler = newVespaClientHandler(handlerClassName, "/document/v1/*", options); cluster.addComponent(handler); if (!options.useNewRestapiHandler) { var executor = new Threadpool( "restapi-handler", cluster, options.restApiThreadpoolOptions, options.feedThreadPoolSizeFactor); handler.inject(executor); handler.addComponent(executor); } }
handler.inject(executor);
private static void addRestApiHandler(ContainerCluster<?> cluster, Options options) { String handlerClassName = options.useNewRestapiHandler ? "com.yahoo.document.restapi.resource.DocumentV1ApiHandler" : "com.yahoo.document.restapi.resource.RestApi"; var handler = newVespaClientHandler(handlerClassName, "/document/v1/*", options); cluster.addComponent(handler); if (!options.useNewRestapiHandler) { var executor = new Threadpool( "restapi-handler", cluster, options.restApiThreadpoolOptions, options.feedThreadPoolSizeFactor); handler.inject(executor); handler.addComponent(executor); } }
class ContainerDocumentApi { private static final int FALLBACK_MAX_POOL_SIZE = 0; private static final int FALLBACK_CORE_POOL_SIZE = 0; public ContainerDocumentApi(ContainerCluster<?> cluster, Options options) { addRestApiHandler(cluster, options); addFeedHandler(cluster, options); } private static void addFeedHandler(ContainerCluster<?> cluster, Options options) { String bindingSuffix = ContainerCluster.RESERVED_URI_PREFIX + "/feedapi"; var handler = newVespaClientHandler( "com.yahoo.vespa.http.server.FeedHandler", bindingSuffix, options); cluster.addComponent(handler); var executor = new Threadpool( "feedapi-handler", cluster, options.feedApiThreadpoolOptions, options.feedThreadPoolSizeFactor); handler.inject(executor); handler.addComponent(executor); } private static Handler<AbstractConfigProducer<?>> newVespaClientHandler( String componentId, String bindingSuffix, Options options) { Handler<AbstractConfigProducer<?>> handler = new Handler<>(new ComponentModel( BundleInstantiationSpecification.getFromStrings(componentId, null, "vespaclient-container-plugin"), "")); if (options.bindings.isEmpty()) { handler.addServerBindings( SystemBindingPattern.fromHttpPath(bindingSuffix), SystemBindingPattern.fromHttpPath(bindingSuffix + '/')); } else { for (String rootBinding : options.bindings) { String pathWithoutLeadingSlash = bindingSuffix.substring(1); handler.addServerBindings( UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash), UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash + '/')); } } return handler; } public static final class Options { private final Collection<String> bindings; private final ContainerThreadpool.UserOptions restApiThreadpoolOptions; private final ContainerThreadpool.UserOptions feedApiThreadpoolOptions; private final double feedThreadPoolSizeFactor; private final boolean useNewRestapiHandler; public Options(Collection<String> bindings, ContainerThreadpool.UserOptions restApiThreadpoolOptions, ContainerThreadpool.UserOptions feedApiThreadpoolOptions, double feedThreadPoolSizeFactor, boolean useNewRestapiHandler) { this.bindings = Collections.unmodifiableCollection(bindings); this.restApiThreadpoolOptions = restApiThreadpoolOptions; this.feedApiThreadpoolOptions = feedApiThreadpoolOptions; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; this.useNewRestapiHandler = useNewRestapiHandler; } } private static class Threadpool extends ContainerThreadpool { private final ContainerCluster<?> cluster; private final double feedThreadPoolSizeFactor; Threadpool(String name, ContainerCluster<?> cluster, ContainerThreadpool.UserOptions threadpoolOptions, double feedThreadPoolSizeFactor ) { super(name, threadpoolOptions); this.cluster = cluster; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; } @Override public void getConfig(ContainerThreadpoolConfig.Builder builder) { super.getConfig(builder); if (hasUserOptions()) return; builder.maxThreads(maxPoolSize()); builder.minThreads(minPoolSize()); builder.queueSize(500); } private int maxPoolSize() { double vcpu = vcpu(cluster); if (vcpu == 0) return FALLBACK_MAX_POOL_SIZE; return Math.max(2, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor)); } private int minPoolSize() { double vcpu = vcpu(cluster); if (vcpu == 0) return FALLBACK_CORE_POOL_SIZE; return Math.max(1, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor * 0.5)); } } }
class ContainerDocumentApi { private static final int FALLBACK_MAX_POOL_SIZE = 0; private static final int FALLBACK_CORE_POOL_SIZE = 0; public ContainerDocumentApi(ContainerCluster<?> cluster, Options options) { addRestApiHandler(cluster, options); addFeedHandler(cluster, options); } private static void addFeedHandler(ContainerCluster<?> cluster, Options options) { String bindingSuffix = ContainerCluster.RESERVED_URI_PREFIX + "/feedapi"; var handler = newVespaClientHandler( "com.yahoo.vespa.http.server.FeedHandler", bindingSuffix, options); cluster.addComponent(handler); var executor = new Threadpool( "feedapi-handler", cluster, options.feedApiThreadpoolOptions, options.feedThreadPoolSizeFactor); handler.inject(executor); handler.addComponent(executor); } private static Handler<AbstractConfigProducer<?>> newVespaClientHandler( String componentId, String bindingSuffix, Options options) { Handler<AbstractConfigProducer<?>> handler = new Handler<>(new ComponentModel( BundleInstantiationSpecification.getFromStrings(componentId, null, "vespaclient-container-plugin"), "")); if (options.bindings.isEmpty()) { handler.addServerBindings( SystemBindingPattern.fromHttpPath(bindingSuffix), SystemBindingPattern.fromHttpPath(bindingSuffix + '/')); } else { for (String rootBinding : options.bindings) { String pathWithoutLeadingSlash = bindingSuffix.substring(1); handler.addServerBindings( UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash), UserBindingPattern.fromPattern(rootBinding + pathWithoutLeadingSlash + '/')); } } return handler; } public static final class Options { private final Collection<String> bindings; private final ContainerThreadpool.UserOptions restApiThreadpoolOptions; private final ContainerThreadpool.UserOptions feedApiThreadpoolOptions; private final double feedThreadPoolSizeFactor; private final boolean useNewRestapiHandler; public Options(Collection<String> bindings, ContainerThreadpool.UserOptions restApiThreadpoolOptions, ContainerThreadpool.UserOptions feedApiThreadpoolOptions, double feedThreadPoolSizeFactor, boolean useNewRestapiHandler) { this.bindings = Collections.unmodifiableCollection(bindings); this.restApiThreadpoolOptions = restApiThreadpoolOptions; this.feedApiThreadpoolOptions = feedApiThreadpoolOptions; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; this.useNewRestapiHandler = useNewRestapiHandler; } } private static class Threadpool extends ContainerThreadpool { private final ContainerCluster<?> cluster; private final double feedThreadPoolSizeFactor; Threadpool(String name, ContainerCluster<?> cluster, ContainerThreadpool.UserOptions threadpoolOptions, double feedThreadPoolSizeFactor ) { super(name, threadpoolOptions); this.cluster = cluster; this.feedThreadPoolSizeFactor = feedThreadPoolSizeFactor; } @Override public void getConfig(ContainerThreadpoolConfig.Builder builder) { super.getConfig(builder); if (hasUserOptions()) return; builder.maxThreads(maxPoolSize()); builder.minThreads(minPoolSize()); builder.queueSize(500); } private int maxPoolSize() { double vcpu = vcpu(cluster); if (vcpu == 0) return FALLBACK_MAX_POOL_SIZE; return Math.max(2, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor)); } private int minPoolSize() { double vcpu = vcpu(cluster); if (vcpu == 0) return FALLBACK_CORE_POOL_SIZE; return Math.max(1, (int)Math.ceil(vcpu * feedThreadPoolSizeFactor * 0.5)); } } }
could we cut the stack trace?
private void addNewDockerNodesOn(LockedNodeList candidates) { for (Node host : candidates) { if ( ! capacity.hasCapacity(host, resources(requestedNodes))) continue; if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue; Optional<IP.Allocation> allocation; try { allocation = host.ipConfig().pool().findAllocation(allNodes, nodeRepository.nameResolver()); if (allocation.isEmpty()) continue; } catch (Exception e) { log.log(Level.WARNING, "Failed allocating IP address on " + host.hostname() + " to " + application + ", cluster " + clusterSpec.id(), e); continue; } log.log(Level.FINE, "Creating new docker node on " + host); Node newNode = Node.createDockerNode(allocation.get().addresses(), allocation.get().hostname(), host.hostname(), resources(requestedNodes).with(host.flavor().resources().diskSpeed()) .with(host.flavor().resources().storageType()), NodeType.tenant); NodeCandidate nodePri = candidateFrom(newNode, false, true); if ( ! nodePri.violatesSpares || isAllocatingForReplacement) { log.log(Level.FINE, "Adding new Docker node " + newNode); nodes.put(newNode, nodePri); } } }
application + ", cluster " + clusterSpec.id(), e);
private void addNewDockerNodesOn(LockedNodeList candidates) { for (Node host : candidates) { if ( ! capacity.hasCapacity(host, resources(requestedNodes))) continue; if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue; Optional<IP.Allocation> allocation; try { allocation = host.ipConfig().pool().findAllocation(allNodes, nodeRepository.nameResolver()); if (allocation.isEmpty()) continue; } catch (Exception e) { log.log(Level.WARNING, "Failed allocating IP address on " + host.hostname() + " to " + application + ", cluster " + clusterSpec.id() + ": " + Exceptions.toMessageString(e)); continue; } log.log(Level.FINE, "Creating new docker node on " + host); Node newNode = Node.createDockerNode(allocation.get().addresses(), allocation.get().hostname(), host.hostname(), resources(requestedNodes).with(host.flavor().resources().diskSpeed()) .with(host.flavor().resources().storageType()), NodeType.tenant); NodeCandidate nodePri = candidateFrom(newNode, false, true); if ( ! nodePri.violatesSpares || isAllocatingForReplacement) { log.log(Level.FINE, "Adding new Docker node " + newNode); nodes.put(newNode, nodePri); } } }
class NodePrioritizer { private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName()); private final Map<Node, NodeCandidate> nodes = new HashMap<>(); private final LockedNodeList allNodes; private final HostCapacity capacity; private final NodeSpec requestedNodes; private final ApplicationId application; private final ClusterSpec clusterSpec; private final NodeRepository nodeRepository; private final boolean isDocker; private final boolean isAllocatingForReplacement; private final boolean isTopologyChange; /** If set, a host can only have nodes by single tenant and does not allow in-place resizing. */ private final boolean allocateFully; private final int currentClusterSize; private final Set<Node> spareHosts; NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec, int wantedGroups, boolean allocateFully, NodeRepository nodeRepository) { this.allNodes = allNodes; this.capacity = new HostCapacity(allNodes, nodeRepository.resourcesCalculator()); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; this.application = application; this.spareHosts = capacity.findSpareHosts(allNodes.asList(), nodeRepository.spareCount()); this.allocateFully = allocateFully; this.nodeRepository = nodeRepository; NodeList nodesInCluster = allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id()); NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired(); long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream() .flatMap(node -> node.allocation() .flatMap(alloc -> alloc.membership().cluster().group().map(ClusterSpec.Group::index)) .stream()) .distinct() .count(); this.isTopologyChange = currentGroups != wantedGroups; this.currentClusterSize = (int) nonRetiredNodesInCluster.state(Node.State.active).stream() .map(node -> node.allocation().flatMap(alloc -> alloc.membership().cluster().group())) .filter(clusterSpec.group()::equals) .count(); this.isAllocatingForReplacement = isReplacement(nodesInCluster.size(), nodesInCluster.state(Node.State.failed).size()); this.isDocker = resources(requestedNodes) != null; } /** Returns the list of nodes sorted by {@link NodeCandidate List<NodeCandidate> prioritize() { return nodes.values().stream().sorted().collect(Collectors.toList()); } /** * Add nodes that have been previously reserved to the same application from * an earlier downsizing of a cluster */ void addSurplusNodes(List<Node> surplusNodes) { for (Node node : surplusNodes) { NodeCandidate nodePri = candidateFrom(node, true, false); if (!nodePri.violatesSpares || isAllocatingForReplacement) { nodes.put(node, nodePri); } } } /** Add a node on each docker host with enough capacity for the requested flavor */ void addNewDockerNodes() { if ( ! isDocker) return; LockedNodeList candidates = allNodes .filter(node -> node.type() != NodeType.host || nodeRepository.canAllocateTenantNodeTo(node)) .filter(node -> node.reservedTo().isEmpty() || node.reservedTo().get().equals(application.tenant())); if (allocateFully) { Set<String> candidateHostnames = candidates.asList().stream() .filter(node -> node.type() == NodeType.tenant) .filter(node -> node.allocation() .map(a -> a.owner().tenant().equals(this.application.tenant())) .orElse(false)) .flatMap(node -> node.parentHostname().stream()) .collect(Collectors.toSet()); candidates = candidates.filter(node -> candidateHostnames.contains(node.hostname())); } addNewDockerNodesOn(candidates); } /** Add existing nodes allocated to the application */ void addApplicationNodes() { EnumSet<Node.State> legalStates = EnumSet.of(Node.State.active, Node.State.inactive, Node.State.reserved); allNodes.asList().stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(application)) .map(node -> candidateFrom(node, false, false)) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** Add nodes already provisioned, but not allocated to any application */ void addReadyNodes() { allNodes.asList().stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> node.state() == Node.State.ready) .map(node -> candidateFrom(node, false, false)) .filter(n -> !n.violatesSpares || isAllocatingForReplacement) .forEach(candidate -> nodes.put(candidate.node, candidate)); } public List<NodeCandidate> nodes() { return new ArrayList<>(nodes.values()); } /** Create a candidate from given node */ private NodeCandidate candidateFrom(Node node, boolean isSurplusNode, boolean isNewNode) { NodeCandidate.Builder builder = new NodeCandidate.Builder(node).surplusNode(isSurplusNode) .newNode(isNewNode); allNodes.parentOf(node).ifPresent(parent -> { NodeResources parentCapacity = capacity.freeCapacityOf(parent, false); builder.parent(parent).freeParentCapacity(parentCapacity); if (!isNewNode) builder.resizable(! allocateFully && requestedNodes.canResize(node.resources(), parentCapacity, isTopologyChange, currentClusterSize)); if (spareHosts.contains(parent)) builder.violatesSpares(true); }); return builder.build(); } private boolean isReplacement(int nofNodesInCluster, int nodeFailedNodes) { if (nodeFailedNodes == 0) return false; return requestedNodes.fulfilledBy(nofNodesInCluster - nodeFailedNodes); } private static NodeResources resources(NodeSpec requestedNodes) { if ( ! (requestedNodes instanceof NodeSpec.CountNodeSpec)) return null; return requestedNodes.resources().get(); } }
class NodePrioritizer { private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName()); private final Map<Node, NodeCandidate> nodes = new HashMap<>(); private final LockedNodeList allNodes; private final HostCapacity capacity; private final NodeSpec requestedNodes; private final ApplicationId application; private final ClusterSpec clusterSpec; private final NodeRepository nodeRepository; private final boolean isDocker; private final boolean isAllocatingForReplacement; private final boolean isTopologyChange; /** If set, a host can only have nodes by single tenant and does not allow in-place resizing. */ private final boolean allocateFully; private final int currentClusterSize; private final Set<Node> spareHosts; NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec, int wantedGroups, boolean allocateFully, NodeRepository nodeRepository) { this.allNodes = allNodes; this.capacity = new HostCapacity(allNodes, nodeRepository.resourcesCalculator()); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; this.application = application; this.spareHosts = capacity.findSpareHosts(allNodes.asList(), nodeRepository.spareCount()); this.allocateFully = allocateFully; this.nodeRepository = nodeRepository; NodeList nodesInCluster = allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id()); NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired(); long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream() .flatMap(node -> node.allocation() .flatMap(alloc -> alloc.membership().cluster().group().map(ClusterSpec.Group::index)) .stream()) .distinct() .count(); this.isTopologyChange = currentGroups != wantedGroups; this.currentClusterSize = (int) nonRetiredNodesInCluster.state(Node.State.active).stream() .map(node -> node.allocation().flatMap(alloc -> alloc.membership().cluster().group())) .filter(clusterSpec.group()::equals) .count(); this.isAllocatingForReplacement = isReplacement(nodesInCluster.size(), nodesInCluster.state(Node.State.failed).size()); this.isDocker = resources(requestedNodes) != null; } /** Returns the list of nodes sorted by {@link NodeCandidate List<NodeCandidate> prioritize() { return nodes.values().stream().sorted().collect(Collectors.toList()); } /** * Add nodes that have been previously reserved to the same application from * an earlier downsizing of a cluster */ void addSurplusNodes(List<Node> surplusNodes) { for (Node node : surplusNodes) { NodeCandidate nodePri = candidateFrom(node, true, false); if (!nodePri.violatesSpares || isAllocatingForReplacement) { nodes.put(node, nodePri); } } } /** Add a node on each docker host with enough capacity for the requested flavor */ void addNewDockerNodes() { if ( ! isDocker) return; LockedNodeList candidates = allNodes .filter(node -> node.type() != NodeType.host || nodeRepository.canAllocateTenantNodeTo(node)) .filter(node -> node.reservedTo().isEmpty() || node.reservedTo().get().equals(application.tenant())); if (allocateFully) { Set<String> candidateHostnames = candidates.asList().stream() .filter(node -> node.type() == NodeType.tenant) .filter(node -> node.allocation() .map(a -> a.owner().tenant().equals(this.application.tenant())) .orElse(false)) .flatMap(node -> node.parentHostname().stream()) .collect(Collectors.toSet()); candidates = candidates.filter(node -> candidateHostnames.contains(node.hostname())); } addNewDockerNodesOn(candidates); } /** Add existing nodes allocated to the application */ void addApplicationNodes() { EnumSet<Node.State> legalStates = EnumSet.of(Node.State.active, Node.State.inactive, Node.State.reserved); allNodes.asList().stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(application)) .map(node -> candidateFrom(node, false, false)) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** Add nodes already provisioned, but not allocated to any application */ void addReadyNodes() { allNodes.asList().stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> node.state() == Node.State.ready) .map(node -> candidateFrom(node, false, false)) .filter(n -> !n.violatesSpares || isAllocatingForReplacement) .forEach(candidate -> nodes.put(candidate.node, candidate)); } public List<NodeCandidate> nodes() { return new ArrayList<>(nodes.values()); } /** Create a candidate from given node */ private NodeCandidate candidateFrom(Node node, boolean isSurplusNode, boolean isNewNode) { NodeCandidate.Builder builder = new NodeCandidate.Builder(node).surplusNode(isSurplusNode) .newNode(isNewNode); allNodes.parentOf(node).ifPresent(parent -> { NodeResources parentCapacity = capacity.freeCapacityOf(parent, false); builder.parent(parent).freeParentCapacity(parentCapacity); if (!isNewNode) builder.resizable(! allocateFully && requestedNodes.canResize(node.resources(), parentCapacity, isTopologyChange, currentClusterSize)); if (spareHosts.contains(parent)) builder.violatesSpares(true); }); return builder.build(); } private boolean isReplacement(int nofNodesInCluster, int nodeFailedNodes) { if (nodeFailedNodes == 0) return false; return requestedNodes.fulfilledBy(nofNodesInCluster - nodeFailedNodes); } private static NodeResources resources(NodeSpec requestedNodes) { if ( ! (requestedNodes instanceof NodeSpec.CountNodeSpec)) return null; return requestedNodes.resources().get(); } }
Done.
private void addNewDockerNodesOn(LockedNodeList candidates) { for (Node host : candidates) { if ( ! capacity.hasCapacity(host, resources(requestedNodes))) continue; if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue; Optional<IP.Allocation> allocation; try { allocation = host.ipConfig().pool().findAllocation(allNodes, nodeRepository.nameResolver()); if (allocation.isEmpty()) continue; } catch (Exception e) { log.log(Level.WARNING, "Failed allocating IP address on " + host.hostname() + " to " + application + ", cluster " + clusterSpec.id(), e); continue; } log.log(Level.FINE, "Creating new docker node on " + host); Node newNode = Node.createDockerNode(allocation.get().addresses(), allocation.get().hostname(), host.hostname(), resources(requestedNodes).with(host.flavor().resources().diskSpeed()) .with(host.flavor().resources().storageType()), NodeType.tenant); NodeCandidate nodePri = candidateFrom(newNode, false, true); if ( ! nodePri.violatesSpares || isAllocatingForReplacement) { log.log(Level.FINE, "Adding new Docker node " + newNode); nodes.put(newNode, nodePri); } } }
application + ", cluster " + clusterSpec.id(), e);
private void addNewDockerNodesOn(LockedNodeList candidates) { for (Node host : candidates) { if ( ! capacity.hasCapacity(host, resources(requestedNodes))) continue; if ( ! allNodes.childrenOf(host).owner(application).cluster(clusterSpec.id()).isEmpty()) continue; Optional<IP.Allocation> allocation; try { allocation = host.ipConfig().pool().findAllocation(allNodes, nodeRepository.nameResolver()); if (allocation.isEmpty()) continue; } catch (Exception e) { log.log(Level.WARNING, "Failed allocating IP address on " + host.hostname() + " to " + application + ", cluster " + clusterSpec.id() + ": " + Exceptions.toMessageString(e)); continue; } log.log(Level.FINE, "Creating new docker node on " + host); Node newNode = Node.createDockerNode(allocation.get().addresses(), allocation.get().hostname(), host.hostname(), resources(requestedNodes).with(host.flavor().resources().diskSpeed()) .with(host.flavor().resources().storageType()), NodeType.tenant); NodeCandidate nodePri = candidateFrom(newNode, false, true); if ( ! nodePri.violatesSpares || isAllocatingForReplacement) { log.log(Level.FINE, "Adding new Docker node " + newNode); nodes.put(newNode, nodePri); } } }
class NodePrioritizer { private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName()); private final Map<Node, NodeCandidate> nodes = new HashMap<>(); private final LockedNodeList allNodes; private final HostCapacity capacity; private final NodeSpec requestedNodes; private final ApplicationId application; private final ClusterSpec clusterSpec; private final NodeRepository nodeRepository; private final boolean isDocker; private final boolean isAllocatingForReplacement; private final boolean isTopologyChange; /** If set, a host can only have nodes by single tenant and does not allow in-place resizing. */ private final boolean allocateFully; private final int currentClusterSize; private final Set<Node> spareHosts; NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec, int wantedGroups, boolean allocateFully, NodeRepository nodeRepository) { this.allNodes = allNodes; this.capacity = new HostCapacity(allNodes, nodeRepository.resourcesCalculator()); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; this.application = application; this.spareHosts = capacity.findSpareHosts(allNodes.asList(), nodeRepository.spareCount()); this.allocateFully = allocateFully; this.nodeRepository = nodeRepository; NodeList nodesInCluster = allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id()); NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired(); long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream() .flatMap(node -> node.allocation() .flatMap(alloc -> alloc.membership().cluster().group().map(ClusterSpec.Group::index)) .stream()) .distinct() .count(); this.isTopologyChange = currentGroups != wantedGroups; this.currentClusterSize = (int) nonRetiredNodesInCluster.state(Node.State.active).stream() .map(node -> node.allocation().flatMap(alloc -> alloc.membership().cluster().group())) .filter(clusterSpec.group()::equals) .count(); this.isAllocatingForReplacement = isReplacement(nodesInCluster.size(), nodesInCluster.state(Node.State.failed).size()); this.isDocker = resources(requestedNodes) != null; } /** Returns the list of nodes sorted by {@link NodeCandidate List<NodeCandidate> prioritize() { return nodes.values().stream().sorted().collect(Collectors.toList()); } /** * Add nodes that have been previously reserved to the same application from * an earlier downsizing of a cluster */ void addSurplusNodes(List<Node> surplusNodes) { for (Node node : surplusNodes) { NodeCandidate nodePri = candidateFrom(node, true, false); if (!nodePri.violatesSpares || isAllocatingForReplacement) { nodes.put(node, nodePri); } } } /** Add a node on each docker host with enough capacity for the requested flavor */ void addNewDockerNodes() { if ( ! isDocker) return; LockedNodeList candidates = allNodes .filter(node -> node.type() != NodeType.host || nodeRepository.canAllocateTenantNodeTo(node)) .filter(node -> node.reservedTo().isEmpty() || node.reservedTo().get().equals(application.tenant())); if (allocateFully) { Set<String> candidateHostnames = candidates.asList().stream() .filter(node -> node.type() == NodeType.tenant) .filter(node -> node.allocation() .map(a -> a.owner().tenant().equals(this.application.tenant())) .orElse(false)) .flatMap(node -> node.parentHostname().stream()) .collect(Collectors.toSet()); candidates = candidates.filter(node -> candidateHostnames.contains(node.hostname())); } addNewDockerNodesOn(candidates); } /** Add existing nodes allocated to the application */ void addApplicationNodes() { EnumSet<Node.State> legalStates = EnumSet.of(Node.State.active, Node.State.inactive, Node.State.reserved); allNodes.asList().stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(application)) .map(node -> candidateFrom(node, false, false)) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** Add nodes already provisioned, but not allocated to any application */ void addReadyNodes() { allNodes.asList().stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> node.state() == Node.State.ready) .map(node -> candidateFrom(node, false, false)) .filter(n -> !n.violatesSpares || isAllocatingForReplacement) .forEach(candidate -> nodes.put(candidate.node, candidate)); } public List<NodeCandidate> nodes() { return new ArrayList<>(nodes.values()); } /** Create a candidate from given node */ private NodeCandidate candidateFrom(Node node, boolean isSurplusNode, boolean isNewNode) { NodeCandidate.Builder builder = new NodeCandidate.Builder(node).surplusNode(isSurplusNode) .newNode(isNewNode); allNodes.parentOf(node).ifPresent(parent -> { NodeResources parentCapacity = capacity.freeCapacityOf(parent, false); builder.parent(parent).freeParentCapacity(parentCapacity); if (!isNewNode) builder.resizable(! allocateFully && requestedNodes.canResize(node.resources(), parentCapacity, isTopologyChange, currentClusterSize)); if (spareHosts.contains(parent)) builder.violatesSpares(true); }); return builder.build(); } private boolean isReplacement(int nofNodesInCluster, int nodeFailedNodes) { if (nodeFailedNodes == 0) return false; return requestedNodes.fulfilledBy(nofNodesInCluster - nodeFailedNodes); } private static NodeResources resources(NodeSpec requestedNodes) { if ( ! (requestedNodes instanceof NodeSpec.CountNodeSpec)) return null; return requestedNodes.resources().get(); } }
class NodePrioritizer { private final static Logger log = Logger.getLogger(NodePrioritizer.class.getName()); private final Map<Node, NodeCandidate> nodes = new HashMap<>(); private final LockedNodeList allNodes; private final HostCapacity capacity; private final NodeSpec requestedNodes; private final ApplicationId application; private final ClusterSpec clusterSpec; private final NodeRepository nodeRepository; private final boolean isDocker; private final boolean isAllocatingForReplacement; private final boolean isTopologyChange; /** If set, a host can only have nodes by single tenant and does not allow in-place resizing. */ private final boolean allocateFully; private final int currentClusterSize; private final Set<Node> spareHosts; NodePrioritizer(LockedNodeList allNodes, ApplicationId application, ClusterSpec clusterSpec, NodeSpec nodeSpec, int wantedGroups, boolean allocateFully, NodeRepository nodeRepository) { this.allNodes = allNodes; this.capacity = new HostCapacity(allNodes, nodeRepository.resourcesCalculator()); this.requestedNodes = nodeSpec; this.clusterSpec = clusterSpec; this.application = application; this.spareHosts = capacity.findSpareHosts(allNodes.asList(), nodeRepository.spareCount()); this.allocateFully = allocateFully; this.nodeRepository = nodeRepository; NodeList nodesInCluster = allNodes.owner(application).type(clusterSpec.type()).cluster(clusterSpec.id()); NodeList nonRetiredNodesInCluster = nodesInCluster.not().retired(); long currentGroups = nonRetiredNodesInCluster.state(Node.State.active).stream() .flatMap(node -> node.allocation() .flatMap(alloc -> alloc.membership().cluster().group().map(ClusterSpec.Group::index)) .stream()) .distinct() .count(); this.isTopologyChange = currentGroups != wantedGroups; this.currentClusterSize = (int) nonRetiredNodesInCluster.state(Node.State.active).stream() .map(node -> node.allocation().flatMap(alloc -> alloc.membership().cluster().group())) .filter(clusterSpec.group()::equals) .count(); this.isAllocatingForReplacement = isReplacement(nodesInCluster.size(), nodesInCluster.state(Node.State.failed).size()); this.isDocker = resources(requestedNodes) != null; } /** Returns the list of nodes sorted by {@link NodeCandidate List<NodeCandidate> prioritize() { return nodes.values().stream().sorted().collect(Collectors.toList()); } /** * Add nodes that have been previously reserved to the same application from * an earlier downsizing of a cluster */ void addSurplusNodes(List<Node> surplusNodes) { for (Node node : surplusNodes) { NodeCandidate nodePri = candidateFrom(node, true, false); if (!nodePri.violatesSpares || isAllocatingForReplacement) { nodes.put(node, nodePri); } } } /** Add a node on each docker host with enough capacity for the requested flavor */ void addNewDockerNodes() { if ( ! isDocker) return; LockedNodeList candidates = allNodes .filter(node -> node.type() != NodeType.host || nodeRepository.canAllocateTenantNodeTo(node)) .filter(node -> node.reservedTo().isEmpty() || node.reservedTo().get().equals(application.tenant())); if (allocateFully) { Set<String> candidateHostnames = candidates.asList().stream() .filter(node -> node.type() == NodeType.tenant) .filter(node -> node.allocation() .map(a -> a.owner().tenant().equals(this.application.tenant())) .orElse(false)) .flatMap(node -> node.parentHostname().stream()) .collect(Collectors.toSet()); candidates = candidates.filter(node -> candidateHostnames.contains(node.hostname())); } addNewDockerNodesOn(candidates); } /** Add existing nodes allocated to the application */ void addApplicationNodes() { EnumSet<Node.State> legalStates = EnumSet.of(Node.State.active, Node.State.inactive, Node.State.reserved); allNodes.asList().stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> legalStates.contains(node.state())) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(application)) .map(node -> candidateFrom(node, false, false)) .forEach(prioritizableNode -> nodes.put(prioritizableNode.node, prioritizableNode)); } /** Add nodes already provisioned, but not allocated to any application */ void addReadyNodes() { allNodes.asList().stream() .filter(node -> node.type() == requestedNodes.type()) .filter(node -> node.state() == Node.State.ready) .map(node -> candidateFrom(node, false, false)) .filter(n -> !n.violatesSpares || isAllocatingForReplacement) .forEach(candidate -> nodes.put(candidate.node, candidate)); } public List<NodeCandidate> nodes() { return new ArrayList<>(nodes.values()); } /** Create a candidate from given node */ private NodeCandidate candidateFrom(Node node, boolean isSurplusNode, boolean isNewNode) { NodeCandidate.Builder builder = new NodeCandidate.Builder(node).surplusNode(isSurplusNode) .newNode(isNewNode); allNodes.parentOf(node).ifPresent(parent -> { NodeResources parentCapacity = capacity.freeCapacityOf(parent, false); builder.parent(parent).freeParentCapacity(parentCapacity); if (!isNewNode) builder.resizable(! allocateFully && requestedNodes.canResize(node.resources(), parentCapacity, isTopologyChange, currentClusterSize)); if (spareHosts.contains(parent)) builder.violatesSpares(true); }); return builder.build(); } private boolean isReplacement(int nofNodesInCluster, int nodeFailedNodes) { if (nodeFailedNodes == 0) return false; return requestedNodes.fulfilledBy(nofNodesInCluster - nodeFailedNodes); } private static NodeResources resources(NodeSpec requestedNodes) { if ( ! (requestedNodes instanceof NodeSpec.CountNodeSpec)) return null; return requestedNodes.resources().get(); } }
```suggestion // Already correct record, nothing to do ```
private void updateNodeDownState() { NodeList activeNodes = NodeList.copyOf(nodeRepository().getNodes(Node.State.active)); serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName().forEach((hostname, serviceInstances) -> { Optional<Node> node = activeNodes.matching(n -> n.hostname().equals(hostname.toString())).first(); if (node.isEmpty()) return; boolean badNode = badNode(serviceInstances); if (badNode == node.get().history().event(History.Event.Type.down).isPresent()) return; ApplicationId owner = node.get().allocation().get().owner(); try (var lock = nodeRepository().lock(owner, Duration.ofSeconds(1))) { node = getNode(hostname.toString(), owner, lock); if (node.isEmpty()) return; if (badNode) { recordAsDown(node.get(), lock); } else { clearDownRecord(node.get(), lock); } } catch (UncheckedTimeoutException ignored) { } }); }
private void updateNodeDownState() { NodeList activeNodes = NodeList.copyOf(nodeRepository().getNodes(Node.State.active)); serviceMonitor.getServiceModelSnapshot().getServiceInstancesByHostName().forEach((hostname, serviceInstances) -> { Optional<Node> node = activeNodes.matching(n -> n.hostname().equals(hostname.toString())).first(); if (node.isEmpty()) return; boolean badNode = badNode(serviceInstances); if (badNode == node.get().history().event(History.Event.Type.down).isPresent()) return; ApplicationId owner = node.get().allocation().get().owner(); try (var lock = nodeRepository().lock(owner, Duration.ofSeconds(1))) { node = getNode(hostname.toString(), owner, lock); if (node.isEmpty()) return; if (badNode) { recordAsDown(node.get(), lock); } else { clearDownRecord(node.get(), lock); } } catch (UncheckedTimeoutException ignored) { } }); }
class NodeFailer extends NodeRepositoryMaintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Metric for number of hosts that we want to fail, but cannot due to throttling */ static final String throttledHostFailuresMetric = "throttledHostFailures"; /** Metric for number of nodes (docker containers) that we want to fail, but cannot due to throttling */ static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ static final String throttlingActiveMetric = "nodeFailThrottling"; /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; private final Metric metric; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Duration interval, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric) { super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; this.metric = metric; } @Override protected boolean maintain() { int throttledHostFailures = 0; int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().lockUnallocated()) { updateNodeLivenessEventsForReadyNodes(lock); for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) { Node node = entry.getKey(); if (throttle(node)) { if (node.type().isHost()) throttledHostFailures++; else throttledNodeFailures++; continue; } String reason = entry.getValue(); nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); } } updateNodeDownState(); List<Node> activeNodes = nodeRepository().getNodes(Node.State.active); for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason(activeNodes).entrySet()) { Node node = entry.getKey(); if (!failAllowedFor(node.type())) continue; if (throttle(node)) { if (node.type().isHost()) throttledHostFailures++; else throttledNodeFailures++; continue; } String reason = entry.getValue(); failActive(node, reason); } int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures); metric.set(throttlingActiveMetric, throttlingActive, null); metric.set(throttledHostFailuresMetric, throttledHostFailures, null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); return throttlingActive == 0; } private void updateNodeLivenessEventsForReadyNodes(Mutex lock) { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if (lastLocalRequest.isEmpty()) continue; if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) { History updatedHistory = node.history() .with(new History.Event(History.Event.Type.requested, Agent.NodeFailer, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory), lock); } } } private Map<Node, String> getReadyNodesByFailureReason() { Instant oldestAcceptableRequestTime = constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ? Instant.EPOCH : clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : nodeRepository().getNodes(Node.State.ready)) { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); List<String> failureReports = reasonsToFailParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } return nodesByFailureReason; } /** * If the node is down (see {@link * Otherwise we remove any "down" history record. */ private Map<Node, String> getActiveNodesByFailureReason(List<Node> activeNodes) { Instant graceTimeEnd = clock.instant().minus(downTimeLimit); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : activeNodes) { if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) { nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit); } else if (hostSuspended(node, activeNodes)) { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); if (hostNode.type().isHost()) { List<String> failureReports = reasonsToFailParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } } return nodesByFailureReason; } public static List<String> reasonsToFailParentHost(Node hostNode) { return hostNode.reports().getReports().stream() .filter(report -> report.getType().hostShouldBeFailed()) .map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription()) .collect(Collectors.toList()); } /** Returns whether node has any kind of hardware issue */ static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.getNode(parent)).orElse(node); return reasonsToFailParentHost(hostNode).size() > 0; } /** Get node by given hostname and application. The applicationLock must be held when calling this */ private Optional<Node> getNode(String hostname, ApplicationId application, @SuppressWarnings("unused") Mutex applicationLock) { return nodeRepository().getNode(hostname, Node.State.active) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(application)); } private boolean expectConfigRequests(Node node) { return !node.type().isHost(); } private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) { return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant); } private boolean wasMadeReadyBefore(Node node, Instant instant) { return node.history().hasEventBefore(History.Event.Type.readied, instant); } private boolean hasRecordedRequestAfter(Node node, Instant instant) { return node.history().hasEventAfter(History.Event.Type.requested, instant); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean nodeSuspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Is the node and all active children suspended? */ private boolean hostSuspended(Node node, List<Node> activeNodes) { if (!nodeSuspended(node)) return false; if (node.parentHostname().isPresent()) return true; return activeNodes.stream() .filter(childNode -> childNode.parentHostname().isPresent() && childNode.parentHostname().get().equals(node.hostname())) .allMatch(this::nodeSuspended); } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that. * But we refuse to fail out config(host)/controller(host) */ private boolean failAllowedFor(NodeType nodeType) { switch (nodeType) { case tenant: case host: return true; case proxy: case proxyhost: return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; default: return false; } } /** * Returns true if the node is considered bad: All monitored services services are down. * If a node remains bad for a long time, the NodeFailer will try to fail the node. */ static boolean badNode(List<ServiceInstance> services) { Map<ServiceStatus, Long> countsByStatus = services.stream() .collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting())); return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L && countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L; } /** Record a node as down if not already recorded */ private void recordAsDown(Node node, Mutex lock) { if (node.history().event(History.Event.Type.down).isPresent()) return; nodeRepository().write(node.downAt(clock.instant(), Agent.NodeFailer), lock); } /** Clear down record for node, if any */ private void clearDownRecord(Node node, Mutex lock) { if (node.history().event(History.Event.Type.down).isEmpty()) return; nodeRepository().write(node.up(), lock); } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if (deployment.isEmpty()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason; for (Node failingTenantNode : nodeRepository().list().childrenOf(node)) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure); } else { nodeRepository().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); try { deployment.get().activate(); return true; } catch (TransientException e) { log.log(Level.INFO, "Failed to redeploy " + node.allocation().get().owner() + " with a transient error, will be retried by application maintainer: " + Exceptions.toMessageString(e)); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname(), Agent.NodeFailer, "Failed to redeploy after being failed by NodeFailer"); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes(); NodeList recentlyFailedNodes = nodes.stream() .filter(n -> n.state() == Node.State.failed) .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)) .collect(collectingAndThen(Collectors.toList(), NodeList::copyOf)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false; if (node.parentHostname().isEmpty() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(nodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } }
class NodeFailer extends NodeRepositoryMaintainer { private static final Logger log = Logger.getLogger(NodeFailer.class.getName()); private static final Duration nodeRequestInterval = Duration.ofMinutes(10); /** Metric for number of hosts that we want to fail, but cannot due to throttling */ static final String throttledHostFailuresMetric = "throttledHostFailures"; /** Metric for number of nodes (docker containers) that we want to fail, but cannot due to throttling */ static final String throttledNodeFailuresMetric = "throttledNodeFailures"; /** Metric that indicates whether throttling is active where 1 means active and 0 means inactive */ static final String throttlingActiveMetric = "nodeFailThrottling"; /** Provides information about the status of ready hosts */ private final HostLivenessTracker hostLivenessTracker; /** Provides (more accurate) information about the status of active hosts */ private final ServiceMonitor serviceMonitor; private final Deployer deployer; private final Duration downTimeLimit; private final Clock clock; private final Orchestrator orchestrator; private final Instant constructionTime; private final ThrottlePolicy throttlePolicy; private final Metric metric; public NodeFailer(Deployer deployer, HostLivenessTracker hostLivenessTracker, ServiceMonitor serviceMonitor, NodeRepository nodeRepository, Duration downTimeLimit, Duration interval, Clock clock, Orchestrator orchestrator, ThrottlePolicy throttlePolicy, Metric metric) { super(nodeRepository, min(downTimeLimit.dividedBy(2), interval), metric); this.deployer = deployer; this.hostLivenessTracker = hostLivenessTracker; this.serviceMonitor = serviceMonitor; this.downTimeLimit = downTimeLimit; this.clock = clock; this.orchestrator = orchestrator; this.constructionTime = clock.instant(); this.throttlePolicy = throttlePolicy; this.metric = metric; } @Override protected boolean maintain() { int throttledHostFailures = 0; int throttledNodeFailures = 0; try (Mutex lock = nodeRepository().lockUnallocated()) { updateNodeLivenessEventsForReadyNodes(lock); for (Map.Entry<Node, String> entry : getReadyNodesByFailureReason().entrySet()) { Node node = entry.getKey(); if (throttle(node)) { if (node.type().isHost()) throttledHostFailures++; else throttledNodeFailures++; continue; } String reason = entry.getValue(); nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); } } updateNodeDownState(); List<Node> activeNodes = nodeRepository().getNodes(Node.State.active); for (Map.Entry<Node, String> entry : getActiveNodesByFailureReason(activeNodes).entrySet()) { Node node = entry.getKey(); if (!failAllowedFor(node.type())) continue; if (throttle(node)) { if (node.type().isHost()) throttledHostFailures++; else throttledNodeFailures++; continue; } String reason = entry.getValue(); failActive(node, reason); } int throttlingActive = Math.min(1, throttledHostFailures + throttledNodeFailures); metric.set(throttlingActiveMetric, throttlingActive, null); metric.set(throttledHostFailuresMetric, throttledHostFailures, null); metric.set(throttledNodeFailuresMetric, throttledNodeFailures, null); return throttlingActive == 0; } private void updateNodeLivenessEventsForReadyNodes(Mutex lock) { for (Node node : nodeRepository().getNodes(Node.State.ready)) { Optional<Instant> lastLocalRequest = hostLivenessTracker.lastRequestFrom(node.hostname()); if (lastLocalRequest.isEmpty()) continue; if (! node.history().hasEventAfter(History.Event.Type.requested, lastLocalRequest.get())) { History updatedHistory = node.history() .with(new History.Event(History.Event.Type.requested, Agent.NodeFailer, lastLocalRequest.get())); nodeRepository().write(node.with(updatedHistory), lock); } } } private Map<Node, String> getReadyNodesByFailureReason() { Instant oldestAcceptableRequestTime = constructionTime.isAfter(clock.instant().minus(nodeRequestInterval.multipliedBy(2))) ? Instant.EPOCH : clock.instant().minus(downTimeLimit).minus(nodeRequestInterval); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : nodeRepository().getNodes(Node.State.ready)) { if (expectConfigRequests(node) && ! hasNodeRequestedConfigAfter(node, oldestAcceptableRequestTime)) { nodesByFailureReason.put(node, "Not receiving config requests from node"); } else { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); List<String> failureReports = reasonsToFailParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } return nodesByFailureReason; } /** * If the node is down (see {@link * Otherwise we remove any "down" history record. */ private Map<Node, String> getActiveNodesByFailureReason(List<Node> activeNodes) { Instant graceTimeEnd = clock.instant().minus(downTimeLimit); Map<Node, String> nodesByFailureReason = new HashMap<>(); for (Node node : activeNodes) { if (node.history().hasEventBefore(History.Event.Type.down, graceTimeEnd) && ! applicationSuspended(node)) { nodesByFailureReason.put(node, "Node has been down longer than " + downTimeLimit); } else if (hostSuspended(node, activeNodes)) { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository().getNode(parent)).orElse(node); if (hostNode.type().isHost()) { List<String> failureReports = reasonsToFailParentHost(hostNode); if (failureReports.size() > 0) { if (hostNode.equals(node)) { nodesByFailureReason.put(node, "Host has failure reports: " + failureReports); } else { nodesByFailureReason.put(node, "Parent (" + hostNode + ") has failure reports: " + failureReports); } } } } } return nodesByFailureReason; } public static List<String> reasonsToFailParentHost(Node hostNode) { return hostNode.reports().getReports().stream() .filter(report -> report.getType().hostShouldBeFailed()) .map(report -> report.getReportId() + " reported " + report.getCreatedTime() + ": " + report.getDescription()) .collect(Collectors.toList()); } /** Returns whether node has any kind of hardware issue */ static boolean hasHardwareIssue(Node node, NodeRepository nodeRepository) { Node hostNode = node.parentHostname().flatMap(parent -> nodeRepository.getNode(parent)).orElse(node); return reasonsToFailParentHost(hostNode).size() > 0; } /** Get node by given hostname and application. The applicationLock must be held when calling this */ private Optional<Node> getNode(String hostname, ApplicationId application, @SuppressWarnings("unused") Mutex applicationLock) { return nodeRepository().getNode(hostname, Node.State.active) .filter(node -> node.allocation().isPresent()) .filter(node -> node.allocation().get().owner().equals(application)); } private boolean expectConfigRequests(Node node) { return !node.type().isHost(); } private boolean hasNodeRequestedConfigAfter(Node node, Instant instant) { return !wasMadeReadyBefore(node, instant) || hasRecordedRequestAfter(node, instant); } private boolean wasMadeReadyBefore(Node node, Instant instant) { return node.history().hasEventBefore(History.Event.Type.readied, instant); } private boolean hasRecordedRequestAfter(Node node, Instant instant) { return node.history().hasEventAfter(History.Event.Type.requested, instant); } private boolean applicationSuspended(Node node) { try { return orchestrator.getApplicationInstanceStatus(node.allocation().get().owner()) == ApplicationInstanceStatus.ALLOWED_TO_BE_DOWN; } catch (ApplicationIdNotFoundException e) { return false; } } private boolean nodeSuspended(Node node) { try { return orchestrator.getNodeStatus(new HostName(node.hostname())).isSuspended(); } catch (HostNameNotFoundException e) { return false; } } /** Is the node and all active children suspended? */ private boolean hostSuspended(Node node, List<Node> activeNodes) { if (!nodeSuspended(node)) return false; if (node.parentHostname().isPresent()) return true; return activeNodes.stream() .filter(childNode -> childNode.parentHostname().isPresent() && childNode.parentHostname().get().equals(node.hostname())) .allMatch(this::nodeSuspended); } /** * We can attempt to fail any number of *tenant* and *host* nodes because the operation will not be effected * unless the node is replaced. * We can also attempt to fail a single proxy(host) as there should be enough redundancy to handle that. * But we refuse to fail out config(host)/controller(host) */ private boolean failAllowedFor(NodeType nodeType) { switch (nodeType) { case tenant: case host: return true; case proxy: case proxyhost: return nodeRepository().getNodes(nodeType, Node.State.failed).size() == 0; default: return false; } } /** * Returns true if the node is considered bad: All monitored services services are down. * If a node remains bad for a long time, the NodeFailer will try to fail the node. */ static boolean badNode(List<ServiceInstance> services) { Map<ServiceStatus, Long> countsByStatus = services.stream() .collect(Collectors.groupingBy(ServiceInstance::serviceStatus, counting())); return countsByStatus.getOrDefault(ServiceStatus.UP, 0L) <= 0L && countsByStatus.getOrDefault(ServiceStatus.DOWN, 0L) > 0L; } /** Record a node as down if not already recorded */ private void recordAsDown(Node node, Mutex lock) { if (node.history().event(History.Event.Type.down).isPresent()) return; nodeRepository().write(node.downAt(clock.instant(), Agent.NodeFailer), lock); } /** Clear down record for node, if any */ private void clearDownRecord(Node node, Mutex lock) { if (node.history().event(History.Event.Type.down).isEmpty()) return; nodeRepository().write(node.up(), lock); } /** * Called when a node should be moved to the failed state: Do that if it seems safe, * which is when the node repo has available capacity to replace the node (and all its tenant nodes if host). * Otherwise not replacing the node ensures (by Orchestrator check) that no further action will be taken. * * @return whether node was successfully failed */ private boolean failActive(Node node, String reason) { Optional<Deployment> deployment = deployer.deployFromLocalActive(node.allocation().get().owner(), Duration.ofMinutes(30)); if (deployment.isEmpty()) return false; try (Mutex lock = nodeRepository().lock(node.allocation().get().owner())) { boolean allTenantNodesFailedOutSuccessfully = true; String reasonForChildFailure = "Failing due to parent host " + node.hostname() + " failure: " + reason; for (Node failingTenantNode : nodeRepository().list().childrenOf(node)) { if (failingTenantNode.state() == Node.State.active) { allTenantNodesFailedOutSuccessfully &= failActive(failingTenantNode, reasonForChildFailure); } else { nodeRepository().fail(failingTenantNode.hostname(), Agent.NodeFailer, reasonForChildFailure); } } if (! allTenantNodesFailedOutSuccessfully) return false; node = nodeRepository().fail(node.hostname(), Agent.NodeFailer, reason); try { deployment.get().activate(); return true; } catch (TransientException e) { log.log(Level.INFO, "Failed to redeploy " + node.allocation().get().owner() + " with a transient error, will be retried by application maintainer: " + Exceptions.toMessageString(e)); return true; } catch (RuntimeException e) { nodeRepository().reactivate(node.hostname(), Agent.NodeFailer, "Failed to redeploy after being failed by NodeFailer"); log.log(Level.WARNING, "Attempted to fail " + node + " for " + node.allocation().get().owner() + ", but redeploying without the node failed", e); return false; } } } /** Returns true if node failing should be throttled */ private boolean throttle(Node node) { if (throttlePolicy == ThrottlePolicy.disabled) return false; Instant startOfThrottleWindow = clock.instant().minus(throttlePolicy.throttleWindow); List<Node> nodes = nodeRepository().getNodes(); NodeList recentlyFailedNodes = nodes.stream() .filter(n -> n.state() == Node.State.failed) .filter(n -> n.history().hasEventAfter(History.Event.Type.failed, startOfThrottleWindow)) .collect(collectingAndThen(Collectors.toList(), NodeList::copyOf)); if (recentlyFailedNodes.size() < throttlePolicy.allowedToFailOf(nodes.size())) return false; if (node.parentHostname().isEmpty() && recentlyFailedNodes.parents().size() < throttlePolicy.minimumAllowedToFail) return false; log.info(String.format("Want to fail node %s, but throttling is in effect: %s", node.hostname(), throttlePolicy.toHumanReadableString(nodes.size()))); return true; } public enum ThrottlePolicy { hosted(Duration.ofDays(1), 0.02, 2), disabled(Duration.ZERO, 0, 0); private final Duration throttleWindow; private final double fractionAllowedToFail; private final int minimumAllowedToFail; ThrottlePolicy(Duration throttleWindow, double fractionAllowedToFail, int minimumAllowedToFail) { this.throttleWindow = throttleWindow; this.fractionAllowedToFail = fractionAllowedToFail; this.minimumAllowedToFail = minimumAllowedToFail; } public int allowedToFailOf(int totalNodes) { return (int) Math.max(totalNodes * fractionAllowedToFail, minimumAllowedToFail); } public String toHumanReadableString(int totalNodes) { return String.format("Max %.0f%% (%d) or %d nodes can fail over a period of %s", fractionAllowedToFail*100, allowedToFailOf(totalNodes), minimumAllowedToFail, throttleWindow); } } }
```suggestion // Deleting an application is done by deleting the remote session, all config ``` This config server will also delete through the watcher?
public boolean delete(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; try { RemoteSession remoteSession = getRemoteSession(tenant, activeSession.get()); tenant.getSessionRepository().delete(remoteSession); } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } }
public boolean delete(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; try { RemoteSession remoteSession = getRemoteSession(tenant, activeSession.get()); tenant.getSessionRepository().delete(remoteSession); } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
What's the reason this isn't done through the `ApplicationController`?
private boolean hasNoDeployments(ApplicationId applicationId) { var deployments = curator.readApplication(TenantAndApplicationId.from(applicationId)) .flatMap(app -> app.get(applicationId.instance())) .map(Instance::deployments); return deployments.isEmpty() || deployments.get().size() == 0; }
var deployments = curator.readApplication(TenantAndApplicationId.from(applicationId))
private boolean hasNoDeployments(ApplicationId applicationId) { var deployments = curator.readApplication(TenantAndApplicationId.from(applicationId)) .flatMap(app -> app.get(applicationId.instance())) .map(Instance::deployments); return deployments.isEmpty() || deployments.get().size() == 0; }
class EndpointCertificateManager { private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName()); private final ZoneRegistry zoneRegistry; private final CuratorDb curator; private final SecretStore secretStore; private final EndpointCertificateProvider endpointCertificateProvider; private final Clock clock; private final BooleanFlag validateEndpointCertificates; private final StringFlag deleteUnusedEndpointCertificates; private final BooleanFlag endpointCertInSharedRouting; public EndpointCertificateManager(ZoneRegistry zoneRegistry, CuratorDb curator, SecretStore secretStore, EndpointCertificateProvider endpointCertificateProvider, Clock clock, FlagSource flagSource) { this.zoneRegistry = zoneRegistry; this.curator = curator; this.secretStore = secretStore; this.endpointCertificateProvider = endpointCertificateProvider; this.clock = clock; this.validateEndpointCertificates = Flags.VALIDATE_ENDPOINT_CERTIFICATES.bindTo(flagSource); this.deleteUnusedEndpointCertificates = Flags.DELETE_UNUSED_ENDPOINT_CERTIFICATES.bindTo(flagSource); this.endpointCertInSharedRouting = Flags.ENDPOINT_CERT_IN_SHARED_ROUTING.bindTo(flagSource); Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> { try { this.deleteUnusedCertificates(); } catch (Throwable t) { log.log(Level.INFO, "Unexpected Throwable caught while deleting unused endpoint certificates", t); } }, 1, 10, TimeUnit.MINUTES); } public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone, Optional<DeploymentInstanceSpec> instanceSpec) { var t0 = Instant.now(); Optional<EndpointCertificateMetadata> metadata = getOrProvision(instance, zone, instanceSpec); metadata.ifPresent(m -> curator.writeEndpointCertificateMetadata(instance.id(), m.withLastRequested(clock.instant().getEpochSecond()))); Duration duration = Duration.between(t0, Instant.now()); if (duration.toSeconds() > 30) log.log(Level.INFO, String.format("Getting endpoint certificate metadata for %s took %d seconds!", instance.id().serializedForm(), duration.toSeconds())); return metadata; } @NotNull private Optional<EndpointCertificateMetadata> getOrProvision(Instance instance, ZoneId zone, Optional<DeploymentInstanceSpec> instanceSpec) { boolean endpointCertInSharedRouting = this.endpointCertInSharedRouting.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value(); if (!zoneRegistry.zones().directlyRouted().ids().contains(zone) && !endpointCertInSharedRouting) return Optional.empty(); final var currentCertificateMetadata = curator.readEndpointCertificateMetadata(instance.id()); if (currentCertificateMetadata.isEmpty()) { var provisionedCertificateMetadata = provisionEndpointCertificate(instance, Optional.empty(), zone, instanceSpec); curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata); return Optional.of(provisionedCertificateMetadata); } var sansInCertificate = currentCertificateMetadata.get().requestedDnsSans(); var requiredSansForZone = dnsNamesOf(instance.id(), zone); if (sansInCertificate.isPresent() && !sansInCertificate.get().containsAll(requiredSansForZone)) { var reprovisionedCertificateMetadata = provisionEndpointCertificate(instance, currentCertificateMetadata, zone, instanceSpec); curator.writeEndpointCertificateMetadata(instance.id(), reprovisionedCertificateMetadata); validateEndpointCertificate(reprovisionedCertificateMetadata, instance, zone); return Optional.of(reprovisionedCertificateMetadata); } var latestAvailableVersion = latestVersionInSecretStore(currentCertificateMetadata.get()); if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > currentCertificateMetadata.get().version()) { var refreshedCertificateMetadata = currentCertificateMetadata.get().withVersion(latestAvailableVersion.getAsInt()); validateEndpointCertificate(refreshedCertificateMetadata, instance, zone); curator.writeEndpointCertificateMetadata(instance.id(), refreshedCertificateMetadata); return Optional.of(refreshedCertificateMetadata); } validateEndpointCertificate(currentCertificateMetadata.get(), instance, zone); return currentCertificateMetadata; } enum CleanupMode { DISABLE, DRYRUN, ENABLE } private void deleteUnusedCertificates() { CleanupMode mode = CleanupMode.valueOf(deleteUnusedEndpointCertificates.value()); if (mode == CleanupMode.DISABLE) return; var oneMonthAgo = clock.instant().minus(1, ChronoUnit.MONTHS); curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> { var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested()); if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) { log.log(LogLevel.INFO, "Cert for app " + applicationId.serializedForm() + " has not been requested in a month and app has no deployments" + (mode == CleanupMode.ENABLE ? ", deleting from provider and ZK" : "")); if (mode == CleanupMode.ENABLE) { endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData); curator.deleteEndpointCertificateMetadata(applicationId); } } }); } private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) { try { var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName())); var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName())); return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max(); } catch (SecretNotFoundException s) { return OptionalInt.empty(); } } private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance, Optional<EndpointCertificateMetadata> currentMetadata, ZoneId deploymentZone, Optional<DeploymentInstanceSpec> instanceSpec) { List<String> currentlyPresentNames = currentMetadata.isPresent() ? currentMetadata.get().requestedDnsSans().orElseThrow(() -> new RuntimeException("Certificate metadata exists but SANs are not present!")) : Collections.emptyList(); var requiredZones = new LinkedHashSet<>(Set.of(deploymentZone)); var zoneCandidateList = zoneRegistry.zones().controllerUpgraded().zones().stream().map(ZoneApi::getId).collect(Collectors.toList()); if (!deploymentZone.environment().isManuallyDeployed()) { zoneCandidateList.stream() .filter(z -> z.environment().isTest() || instanceSpec.isPresent() && instanceSpec.get().deploysTo(z.environment(), z.region())) .forEach(requiredZones::add); } var requiredNames = requiredZones.stream() .flatMap(zone -> dnsNamesOf(instance.id(), zone).stream()) .collect(Collectors.toCollection(LinkedHashSet::new)); zoneCandidateList.stream() .map(zone -> dnsNamesOf(instance.id(), zone)) .filter(zoneNames -> zoneNames.stream().anyMatch(currentlyPresentNames::contains)) .filter(currentlyPresentNames::containsAll) .forEach(requiredNames::addAll); if (!requiredNames.containsAll(currentlyPresentNames)) throw new RuntimeException("SANs to be requested do not cover all existing names! Missing names: " + currentlyPresentNames.stream().filter(s -> !requiredNames.contains(s)).collect(Collectors.joining(", "))); return endpointCertificateProvider.requestCaSignedCertificate(instance.id(), List.copyOf(requiredNames), currentMetadata); } private void validateEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone) { if (validateEndpointCertificates.value()) try { var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version()); if (pemEncodedEndpointCertificate == null) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Secret store returned null for certificate"); List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate); if (x509CertificateList.isEmpty()) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Empty certificate list"); if (x509CertificateList.size() < 2) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Only a single certificate found in chain - intermediate certificates likely missing"); Instant now = clock.instant(); Instant firstExpiry = Instant.MAX; for (X509Certificate x509Certificate : x509CertificateList) { Instant notBefore = x509Certificate.getNotBefore().toInstant(); Instant notAfter = x509Certificate.getNotAfter().toInstant(); if (now.isBefore(notBefore)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate is not yet valid"); if (now.isAfter(notAfter)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate has expired"); if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter; } X509Certificate endEntityCertificate = x509CertificateList.get(0); Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream() .filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME)) .map(SubjectAlternativeName::getValue).collect(Collectors.toSet()); var dnsNamesOfZone = dnsNamesOf(instance.id(), zone); if (!subjectAlternativeNames.containsAll(dnsNamesOfZone)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate is missing required SANs for zone " + zone.value()); } catch (SecretNotFoundException s) { throw new EndpointCertificateException(EndpointCertificateException.Type.CERT_NOT_AVAILABLE, "Certificate not found in secret store"); } catch (EndpointCertificateException e) { log.log(Level.WARNING, "Certificate validation failure for " + instance.id().serializedForm(), e); throw e; } catch (Exception e) { log.log(Level.WARNING, "Certificate validation failure for " + instance.id().serializedForm(), e); throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate validation failure for app " + instance.id().serializedForm(), e); } } private List<String> dnsNamesOf(ApplicationId applicationId, ZoneId zone) { List<String> endpointDnsNames = new ArrayList<>(); endpointDnsNames.add(commonNameHashOf(applicationId, zoneRegistry.system())); List<Endpoint.EndpointBuilder> endpoints = new ArrayList<>(); if (zone.environment().isProduction()) { endpoints.add(Endpoint.of(applicationId).target(EndpointId.defaultId())); endpoints.add(Endpoint.of(applicationId).wildcard()); } endpoints.add(Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone)); endpoints.add(Endpoint.of(applicationId).wildcard(zone)); endpoints.stream() .map(endpoint -> endpoint.routingMethod(RoutingMethod.exclusive)) .map(endpoint -> endpoint.on(Endpoint.Port.tls())) .map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system())) .map(Endpoint::dnsName).forEach(endpointDnsNames::add); return Collections.unmodifiableList(endpointDnsNames); } /** Create a common name based on a hash of the ApplicationId. This should always be less than 64 characters long. */ @SuppressWarnings("UnstableApiUsage") private static String commonNameHashOf(ApplicationId application, SystemName system) { var hashCode = Hashing.sha1().hashString(application.serializedForm(), Charset.defaultCharset()); var base32encoded = BaseEncoding.base32().omitPadding().lowerCase().encode(hashCode.asBytes()); return 'v' + base32encoded + Endpoint.dnsSuffix(system); } }
class EndpointCertificateManager { private static final Logger log = Logger.getLogger(EndpointCertificateManager.class.getName()); private final ZoneRegistry zoneRegistry; private final CuratorDb curator; private final SecretStore secretStore; private final EndpointCertificateProvider endpointCertificateProvider; private final Clock clock; private final BooleanFlag validateEndpointCertificates; private final StringFlag deleteUnusedEndpointCertificates; private final BooleanFlag endpointCertInSharedRouting; public EndpointCertificateManager(ZoneRegistry zoneRegistry, CuratorDb curator, SecretStore secretStore, EndpointCertificateProvider endpointCertificateProvider, Clock clock, FlagSource flagSource) { this.zoneRegistry = zoneRegistry; this.curator = curator; this.secretStore = secretStore; this.endpointCertificateProvider = endpointCertificateProvider; this.clock = clock; this.validateEndpointCertificates = Flags.VALIDATE_ENDPOINT_CERTIFICATES.bindTo(flagSource); this.deleteUnusedEndpointCertificates = Flags.DELETE_UNUSED_ENDPOINT_CERTIFICATES.bindTo(flagSource); this.endpointCertInSharedRouting = Flags.ENDPOINT_CERT_IN_SHARED_ROUTING.bindTo(flagSource); Executors.newSingleThreadScheduledExecutor().scheduleAtFixedRate(() -> { try { this.deleteUnusedCertificates(); } catch (Throwable t) { log.log(Level.INFO, "Unexpected Throwable caught while deleting unused endpoint certificates", t); } }, 1, 10, TimeUnit.MINUTES); } public Optional<EndpointCertificateMetadata> getEndpointCertificateMetadata(Instance instance, ZoneId zone, Optional<DeploymentInstanceSpec> instanceSpec) { var t0 = Instant.now(); Optional<EndpointCertificateMetadata> metadata = getOrProvision(instance, zone, instanceSpec); metadata.ifPresent(m -> curator.writeEndpointCertificateMetadata(instance.id(), m.withLastRequested(clock.instant().getEpochSecond()))); Duration duration = Duration.between(t0, Instant.now()); if (duration.toSeconds() > 30) log.log(Level.INFO, String.format("Getting endpoint certificate metadata for %s took %d seconds!", instance.id().serializedForm(), duration.toSeconds())); return metadata; } @NotNull private Optional<EndpointCertificateMetadata> getOrProvision(Instance instance, ZoneId zone, Optional<DeploymentInstanceSpec> instanceSpec) { boolean endpointCertInSharedRouting = this.endpointCertInSharedRouting.with(FetchVector.Dimension.APPLICATION_ID, instance.id().serializedForm()).value(); if (!zoneRegistry.zones().directlyRouted().ids().contains(zone) && !endpointCertInSharedRouting) return Optional.empty(); final var currentCertificateMetadata = curator.readEndpointCertificateMetadata(instance.id()); if (currentCertificateMetadata.isEmpty()) { var provisionedCertificateMetadata = provisionEndpointCertificate(instance, Optional.empty(), zone, instanceSpec); curator.writeEndpointCertificateMetadata(instance.id(), provisionedCertificateMetadata); return Optional.of(provisionedCertificateMetadata); } var sansInCertificate = currentCertificateMetadata.get().requestedDnsSans(); var requiredSansForZone = dnsNamesOf(instance.id(), zone); if (sansInCertificate.isPresent() && !sansInCertificate.get().containsAll(requiredSansForZone)) { var reprovisionedCertificateMetadata = provisionEndpointCertificate(instance, currentCertificateMetadata, zone, instanceSpec); curator.writeEndpointCertificateMetadata(instance.id(), reprovisionedCertificateMetadata); validateEndpointCertificate(reprovisionedCertificateMetadata, instance, zone); return Optional.of(reprovisionedCertificateMetadata); } var latestAvailableVersion = latestVersionInSecretStore(currentCertificateMetadata.get()); if (latestAvailableVersion.isPresent() && latestAvailableVersion.getAsInt() > currentCertificateMetadata.get().version()) { var refreshedCertificateMetadata = currentCertificateMetadata.get().withVersion(latestAvailableVersion.getAsInt()); validateEndpointCertificate(refreshedCertificateMetadata, instance, zone); curator.writeEndpointCertificateMetadata(instance.id(), refreshedCertificateMetadata); return Optional.of(refreshedCertificateMetadata); } validateEndpointCertificate(currentCertificateMetadata.get(), instance, zone); return currentCertificateMetadata; } enum CleanupMode { DISABLE, DRYRUN, ENABLE } private void deleteUnusedCertificates() { CleanupMode mode = CleanupMode.valueOf(deleteUnusedEndpointCertificates.value()); if (mode == CleanupMode.DISABLE) return; var oneMonthAgo = clock.instant().minus(1, ChronoUnit.MONTHS); curator.readAllEndpointCertificateMetadata().forEach((applicationId, storedMetaData) -> { var lastRequested = Instant.ofEpochSecond(storedMetaData.lastRequested()); if (lastRequested.isBefore(oneMonthAgo) && hasNoDeployments(applicationId)) { log.log(LogLevel.INFO, "Cert for app " + applicationId.serializedForm() + " has not been requested in a month and app has no deployments" + (mode == CleanupMode.ENABLE ? ", deleting from provider and ZK" : "")); if (mode == CleanupMode.ENABLE) { endpointCertificateProvider.deleteCertificate(applicationId, storedMetaData); curator.deleteEndpointCertificateMetadata(applicationId); } } }); } private OptionalInt latestVersionInSecretStore(EndpointCertificateMetadata originalCertificateMetadata) { try { var certVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.certName())); var keyVersions = new HashSet<>(secretStore.listSecretVersions(originalCertificateMetadata.keyName())); return Sets.intersection(certVersions, keyVersions).stream().mapToInt(Integer::intValue).max(); } catch (SecretNotFoundException s) { return OptionalInt.empty(); } } private EndpointCertificateMetadata provisionEndpointCertificate(Instance instance, Optional<EndpointCertificateMetadata> currentMetadata, ZoneId deploymentZone, Optional<DeploymentInstanceSpec> instanceSpec) { List<String> currentlyPresentNames = currentMetadata.isPresent() ? currentMetadata.get().requestedDnsSans().orElseThrow(() -> new RuntimeException("Certificate metadata exists but SANs are not present!")) : Collections.emptyList(); var requiredZones = new LinkedHashSet<>(Set.of(deploymentZone)); var zoneCandidateList = zoneRegistry.zones().controllerUpgraded().zones().stream().map(ZoneApi::getId).collect(Collectors.toList()); if (!deploymentZone.environment().isManuallyDeployed()) { zoneCandidateList.stream() .filter(z -> z.environment().isTest() || instanceSpec.isPresent() && instanceSpec.get().deploysTo(z.environment(), z.region())) .forEach(requiredZones::add); } var requiredNames = requiredZones.stream() .flatMap(zone -> dnsNamesOf(instance.id(), zone).stream()) .collect(Collectors.toCollection(LinkedHashSet::new)); zoneCandidateList.stream() .map(zone -> dnsNamesOf(instance.id(), zone)) .filter(zoneNames -> zoneNames.stream().anyMatch(currentlyPresentNames::contains)) .filter(currentlyPresentNames::containsAll) .forEach(requiredNames::addAll); if (!requiredNames.containsAll(currentlyPresentNames)) throw new RuntimeException("SANs to be requested do not cover all existing names! Missing names: " + currentlyPresentNames.stream().filter(s -> !requiredNames.contains(s)).collect(Collectors.joining(", "))); return endpointCertificateProvider.requestCaSignedCertificate(instance.id(), List.copyOf(requiredNames), currentMetadata); } private void validateEndpointCertificate(EndpointCertificateMetadata endpointCertificateMetadata, Instance instance, ZoneId zone) { if (validateEndpointCertificates.value()) try { var pemEncodedEndpointCertificate = secretStore.getSecret(endpointCertificateMetadata.certName(), endpointCertificateMetadata.version()); if (pemEncodedEndpointCertificate == null) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Secret store returned null for certificate"); List<X509Certificate> x509CertificateList = X509CertificateUtils.certificateListFromPem(pemEncodedEndpointCertificate); if (x509CertificateList.isEmpty()) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Empty certificate list"); if (x509CertificateList.size() < 2) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Only a single certificate found in chain - intermediate certificates likely missing"); Instant now = clock.instant(); Instant firstExpiry = Instant.MAX; for (X509Certificate x509Certificate : x509CertificateList) { Instant notBefore = x509Certificate.getNotBefore().toInstant(); Instant notAfter = x509Certificate.getNotAfter().toInstant(); if (now.isBefore(notBefore)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate is not yet valid"); if (now.isAfter(notAfter)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate has expired"); if (notAfter.isBefore(firstExpiry)) firstExpiry = notAfter; } X509Certificate endEntityCertificate = x509CertificateList.get(0); Set<String> subjectAlternativeNames = X509CertificateUtils.getSubjectAlternativeNames(endEntityCertificate).stream() .filter(san -> san.getType().equals(SubjectAlternativeName.Type.DNS_NAME)) .map(SubjectAlternativeName::getValue).collect(Collectors.toSet()); var dnsNamesOfZone = dnsNamesOf(instance.id(), zone); if (!subjectAlternativeNames.containsAll(dnsNamesOfZone)) throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate is missing required SANs for zone " + zone.value()); } catch (SecretNotFoundException s) { throw new EndpointCertificateException(EndpointCertificateException.Type.CERT_NOT_AVAILABLE, "Certificate not found in secret store"); } catch (EndpointCertificateException e) { log.log(Level.WARNING, "Certificate validation failure for " + instance.id().serializedForm(), e); throw e; } catch (Exception e) { log.log(Level.WARNING, "Certificate validation failure for " + instance.id().serializedForm(), e); throw new EndpointCertificateException(EndpointCertificateException.Type.VERIFICATION_FAILURE, "Certificate validation failure for app " + instance.id().serializedForm(), e); } } private List<String> dnsNamesOf(ApplicationId applicationId, ZoneId zone) { List<String> endpointDnsNames = new ArrayList<>(); endpointDnsNames.add(commonNameHashOf(applicationId, zoneRegistry.system())); List<Endpoint.EndpointBuilder> endpoints = new ArrayList<>(); if (zone.environment().isProduction()) { endpoints.add(Endpoint.of(applicationId).target(EndpointId.defaultId())); endpoints.add(Endpoint.of(applicationId).wildcard()); } endpoints.add(Endpoint.of(applicationId).target(ClusterSpec.Id.from("default"), zone)); endpoints.add(Endpoint.of(applicationId).wildcard(zone)); endpoints.stream() .map(endpoint -> endpoint.routingMethod(RoutingMethod.exclusive)) .map(endpoint -> endpoint.on(Endpoint.Port.tls())) .map(endpointBuilder -> endpointBuilder.in(zoneRegistry.system())) .map(Endpoint::dnsName).forEach(endpointDnsNames::add); return Collections.unmodifiableList(endpointDnsNames); } /** Create a common name based on a hash of the ApplicationId. This should always be less than 64 characters long. */ @SuppressWarnings("UnstableApiUsage") private static String commonNameHashOf(ApplicationId application, SystemName system) { var hashCode = Hashing.sha1().hashString(application.serializedForm(), Charset.defaultCharset()); var base32encoded = BaseEncoding.base32().omitPadding().lowerCase().encode(hashCode.asBytes()); return 'v' + base32encoded + Endpoint.dnsSuffix(system); } }
Yes, thanks
public boolean delete(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; try { RemoteSession remoteSession = getRemoteSession(tenant, activeSession.get()); tenant.getSessionRepository().delete(remoteSession); } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } }
public boolean delete(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; try { RemoteSession remoteSession = getRemoteSession(tenant, activeSession.get()); tenant.getSessionRepository().delete(remoteSession); } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
I now think the original comment is correct - delete(RemoteSession) only called in this config server appears to do delete and deactivate the remote session, the same as sessionRemoved(long) which seems to be invoked from watcher.
public boolean delete(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; try { RemoteSession remoteSession = getRemoteSession(tenant, activeSession.get()); tenant.getSessionRepository().delete(remoteSession); } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } }
public boolean delete(ApplicationId applicationId) { Tenant tenant = getTenant(applicationId); if (tenant == null) return false; TenantApplications tenantApplications = tenant.getApplicationRepo(); try (Lock lock = tenantApplications.lock(applicationId)) { Optional<Long> activeSession = tenantApplications.activeSessionOf(applicationId); if (activeSession.isEmpty()) return false; try { RemoteSession remoteSession = getRemoteSession(tenant, activeSession.get()); tenant.getSessionRepository().delete(remoteSession); } catch (NotFoundException e) { log.log(Level.INFO, TenantRepository.logPre(applicationId) + "Active session exists, but has not been deleted properly. Trying to cleanup"); } NestedTransaction transaction = new NestedTransaction(); Curator curator = tenantRepository.getCurator(); transaction.add(new ContainerEndpointsCache(tenant.getPath(), curator).delete(applicationId)); transaction.add(new ApplicationRolesStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(new EndpointCertificateMetadataStore(curator, tenant.getPath()).delete(applicationId)); transaction.add(tenantApplications.createDeleteTransaction(applicationId)); hostProvisioner.ifPresent(provisioner -> provisioner.remove(transaction, applicationId)); transaction.onCommitted(() -> log.log(Level.INFO, "Deleted " + applicationId)); transaction.commit(); return true; } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
class Builder { private TenantRepository tenantRepository; private Optional<Provisioner> hostProvisioner; private HttpProxy httpProxy = new HttpProxy(new SimpleHttpFetcher()); private Clock clock = Clock.systemUTC(); private ConfigserverConfig configserverConfig = new ConfigserverConfig.Builder().build(); private Orchestrator orchestrator; private LogRetriever logRetriever = new LogRetriever(); private TesterClient testerClient = new TesterClient(); private Metric metric = new NullMetric(); private FlagSource flagSource = new InMemoryFlagSource(); public Builder withTenantRepository(TenantRepository tenantRepository) { this.tenantRepository = tenantRepository; return this; } public Builder withClock(Clock clock) { this.clock = clock; return this; } public Builder withProvisioner(Provisioner provisioner) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = Optional.ofNullable(provisioner); return this; } public Builder withHostProvisionerProvider(HostProvisionerProvider hostProvisionerProvider) { if (this.hostProvisioner != null) throw new IllegalArgumentException("provisioner already set in builder"); this.hostProvisioner = hostProvisionerProvider.getHostProvisioner(); return this; } public Builder withHttpProxy(HttpProxy httpProxy) { this.httpProxy = httpProxy; return this; } public Builder withConfigserverConfig(ConfigserverConfig configserverConfig) { this.configserverConfig = configserverConfig; return this; } public Builder withOrchestrator(Orchestrator orchestrator) { this.orchestrator = orchestrator; return this; } public Builder withLogRetriever(LogRetriever logRetriever) { this.logRetriever = logRetriever; return this; } public Builder withTesterClient(TesterClient testerClient) { this.testerClient = testerClient; return this; } public Builder withFlagSource(FlagSource flagSource) { this.flagSource = flagSource; return this; } public Builder withMetric(Metric metric) { this.metric = metric; return this; } public ApplicationRepository build() { return new ApplicationRepository(tenantRepository, hostProvisioner, InfraDeployerProvider.empty().getInfraDeployer(), new ConfigConvergenceChecker(), httpProxy, configserverConfig, orchestrator, logRetriever, clock, testerClient, metric, flagSource); } }
update comment?
public void testPollingOldConnections() { OperationProcessorTester tester = new OperationProcessorTester(); tester.tick(3); IOThread ioThread = tester.getSingleIOThread(); DryRunGatewayConnection firstConnection = (DryRunGatewayConnection)ioThread.currentConnection(); assertEquals(0, ioThread.oldConnections().size()); firstConnection.hold(true); tester.send("doc1"); tester.tick(1); tester.clock().advance(Duration.ofSeconds(31)); tester.tick(3); assertEquals(1, ioThread.oldConnections().size()); assertEquals(firstConnection, ioThread.oldConnections().get(0)); assertNotSame(firstConnection, ioThread.currentConnection()); assertEquals(31, firstConnection.lastPollTime().toEpochMilli() / 1000); assertLastPollTimeWhenAdvancing(31, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(33, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(33, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(33, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(33, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); tester.clock().advance(Duration.ofSeconds(200)); tester.tick(1); assertEquals("Old connection is eventually removed", 0, ioThread.oldConnections().size()); }
tester.clock().advance(Duration.ofSeconds(31));
public void testPollingOldConnections() { OperationProcessorTester tester = new OperationProcessorTester(); tester.tick(3); IOThread ioThread = tester.getSingleIOThread(); DryRunGatewayConnection firstConnection = (DryRunGatewayConnection)ioThread.currentConnection(); assertEquals(0, ioThread.oldConnections().size()); firstConnection.hold(true); tester.send("doc1"); tester.tick(1); tester.clock().advance(Duration.ofSeconds(31)); tester.tick(3); assertEquals(1, ioThread.oldConnections().size()); assertEquals(firstConnection, ioThread.oldConnections().get(0)); assertNotSame(firstConnection, ioThread.currentConnection()); assertEquals(31, firstConnection.lastPollTime().toEpochMilli() / 1000); assertLastPollTimeWhenAdvancing(31, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(33, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(33, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(33, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(33, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(37, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); assertLastPollTimeWhenAdvancing(45, 1, firstConnection, tester); tester.clock().advance(Duration.ofSeconds(200)); tester.tick(1); assertEquals("Old connection is eventually removed", 0, ioThread.oldConnections().size()); }
class IOThreadTest { @Test public void testSuccessfulWriting() { OperationProcessorTester tester = new OperationProcessorTester(); assertEquals(0, tester.incomplete()); assertEquals(0, tester.success()); assertEquals(0, tester.failures()); tester.send("doc1"); tester.send("doc2"); tester.send("doc3"); assertEquals(3, tester.incomplete()); assertEquals(0, tester.success()); assertEquals(0, tester.failures()); tester.tick(1); assertEquals(3, tester.incomplete()); tester.tick(1); assertEquals(3, tester.incomplete()); tester.tick(1); assertEquals(0, tester.incomplete()); assertEquals(3, tester.success()); assertEquals(0, tester.failures()); } @Test public void testFatalExceptionOnHandshake() { OperationProcessorTester tester = new OperationProcessorTester(); IOThread ioThread = tester.getSingleIOThread(); DryRunGatewayConnection firstConnection = (DryRunGatewayConnection)ioThread.currentConnection(); firstConnection.throwOnHandshake(new ServerResponseException(403, "Not authorized")); tester.send("doc1"); tester.send("doc2"); tester.send("doc3"); tester.tick(3); assertEquals(0, tester.incomplete()); assertEquals(0, ioThread.resultQueue().getPendingSize()); assertEquals(0, tester.success()); assertEquals(3, tester.failures()); } @Test public void testExceptionOnHandshake() { OperationProcessorTester tester = new OperationProcessorTester(); IOThread ioThread = tester.getSingleIOThread(); DryRunGatewayConnection firstConnection = (DryRunGatewayConnection)ioThread.currentConnection(); firstConnection.throwOnHandshake(new ServerResponseException(418, "I'm a teapot")); tester.send("doc1"); tester.tick(3); assertEquals(1, tester.incomplete()); assertEquals(0, ioThread.resultQueue().getPendingSize()); assertEquals(0, tester.success()); assertEquals("Awaiting retry", 0, tester.failures()); } @Test public void testExceptionOnWrite() { OperationProcessorTester tester = new OperationProcessorTester(); IOThread ioThread = tester.getSingleIOThread(); DryRunGatewayConnection firstConnection = (DryRunGatewayConnection)ioThread.currentConnection(); firstConnection.throwOnWrite(new IOException("Test failure")); tester.send("doc1"); tester.tick(3); assertEquals(1, tester.incomplete()); assertEquals(0, ioThread.resultQueue().getPendingSize()); assertEquals(0, tester.success()); assertEquals("Awaiting retry since write exceptions is a transient failure", 0, tester.failures()); } @Test private void assertLastPollTimeWhenAdvancing(int lastPollTimeSeconds, int advanceSeconds, DryRunGatewayConnection connection, OperationProcessorTester tester) { tester.clock().advance(Duration.ofSeconds(advanceSeconds)); tester.tick(1); assertEquals(lastPollTimeSeconds, connection.lastPollTime().toEpochMilli() / 1000); } }
class IOThreadTest { @Test public void testSuccessfulWriting() { OperationProcessorTester tester = new OperationProcessorTester(); assertEquals(0, tester.incomplete()); assertEquals(0, tester.success()); assertEquals(0, tester.failures()); tester.send("doc1"); tester.send("doc2"); tester.send("doc3"); assertEquals(3, tester.incomplete()); assertEquals(0, tester.success()); assertEquals(0, tester.failures()); tester.tick(1); assertEquals(3, tester.incomplete()); tester.tick(1); assertEquals(3, tester.incomplete()); tester.tick(1); assertEquals(0, tester.incomplete()); assertEquals(3, tester.success()); assertEquals(0, tester.failures()); } @Test public void testFatalExceptionOnHandshake() { OperationProcessorTester tester = new OperationProcessorTester(); IOThread ioThread = tester.getSingleIOThread(); DryRunGatewayConnection firstConnection = (DryRunGatewayConnection)ioThread.currentConnection(); firstConnection.throwOnHandshake(new ServerResponseException(403, "Not authorized")); tester.send("doc1"); tester.send("doc2"); tester.send("doc3"); tester.tick(3); assertEquals(0, tester.incomplete()); assertEquals(0, ioThread.resultQueue().getPendingSize()); assertEquals(0, tester.success()); assertEquals(3, tester.failures()); } @Test public void testExceptionOnHandshake() { OperationProcessorTester tester = new OperationProcessorTester(); IOThread ioThread = tester.getSingleIOThread(); DryRunGatewayConnection firstConnection = (DryRunGatewayConnection)ioThread.currentConnection(); firstConnection.throwOnHandshake(new ServerResponseException(418, "I'm a teapot")); tester.send("doc1"); tester.tick(3); assertEquals(1, tester.incomplete()); assertEquals(0, ioThread.resultQueue().getPendingSize()); assertEquals(0, tester.success()); assertEquals("Awaiting retry", 0, tester.failures()); } @Test public void testExceptionOnWrite() { OperationProcessorTester tester = new OperationProcessorTester(); IOThread ioThread = tester.getSingleIOThread(); DryRunGatewayConnection firstConnection = (DryRunGatewayConnection)ioThread.currentConnection(); firstConnection.throwOnWrite(new IOException("Test failure")); tester.send("doc1"); tester.tick(3); assertEquals(1, tester.incomplete()); assertEquals(0, ioThread.resultQueue().getPendingSize()); assertEquals(0, tester.success()); assertEquals("Awaiting retry since write exceptions is a transient failure", 0, tester.failures()); } @Test private void assertLastPollTimeWhenAdvancing(int lastPollTimeSeconds, int advanceSeconds, DryRunGatewayConnection connection, OperationProcessorTester tester) { tester.clock().advance(Duration.ofSeconds(advanceSeconds)); tester.tick(1); assertEquals(lastPollTimeSeconds, connection.lastPollTime().toEpochMilli() / 1000); } }
The old code would just advance to the next node if resolving failing. If we get a node with broken IP, the allocation will basically not succeed until manual intervention since it would keep trying the same node? We should catch this and try next node if this fails. Also, lets drop the stack trace for this, see #14599
public NodeCandidate withNode() { Optional<IP.Allocation> allocation; try { allocation = parent.get().ipConfig().pool().findAllocation(allNodes, nodeRepository.nameResolver()); if (allocation.isEmpty()) throw new IllegalStateException("No free ip addresses on " + parent.get() + ": Cannot allocate node"); } catch (Exception e) { throw new IllegalStateException("Failed allocating IP address on " + parent.get() +": ", e); } Node node = Node.createDockerNode(allocation.get().addresses(), allocation.get().hostname(), parentHostname().get(), resources.with(parent.get().resources().diskSpeed()) .with(parent.get().resources().storageType()), NodeType.tenant); return new ConcreteNodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); }
throw new IllegalStateException("Failed allocating IP address on " + parent.get() +": ", e);
public NodeCandidate withNode() { return this; }
class ConcreteNodeCandidate extends NodeCandidate { private final Node node; ConcreteNodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplus, boolean isNew, boolean isResizeable) { super(freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizeable); this.node = Objects.requireNonNull(node, "Node cannot be null"); } @Override public NodeResources resources() { return node.resources(); } @Override public Optional<String> parentHostname() { return node.parentHostname(); } @Override public NodeType type() { return node.type(); } public Optional<Allocation> allocation() { return node.allocation(); } public Node.State state() { return node.state(); } public boolean wantToRetire() { return node.status().wantToRetire(); } public Flavor flavor() { return node.flavor(); } public NodeCandidate allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) { return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at), freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); } /** Called when the node described by this candidate must be created */ public Node toNode() { return node; } @Override public int compareTo(NodeCandidate other) { int comparison = super.compareTo(other); if (comparison != 0) return comparison; if ( ! (other instanceof ConcreteNodeCandidate)) return -1; return this.node.hostname().compareTo(((ConcreteNodeCandidate)other).node.hostname()); } @Override public String toString() { return node.id(); } }
class ConcreteNodeCandidate extends NodeCandidate { private final Node node; ConcreteNodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplus, boolean isNew, boolean isResizeable) { super(freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizeable); this.node = Objects.requireNonNull(node, "Node cannot be null"); } @Override public NodeResources resources() { return node.resources(); } @Override public Optional<String> parentHostname() { return node.parentHostname(); } @Override public NodeType type() { return node.type(); } @Override public Optional<Allocation> allocation() { return node.allocation(); } @Override public Node.State state() { return node.state(); } @Override public boolean wantToRetire() { return node.status().wantToRetire(); } @Override public Flavor flavor() { return node.flavor(); } @Override public NodeCandidate allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) { return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at), freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); } /** Called when the node described by this candidate must be created */ @Override @Override public Node toNode() { return node; } @Override public boolean isValid() { return true; } @Override public int compareTo(NodeCandidate other) { int comparison = super.compareTo(other); if (comparison != 0) return comparison; if ( ! (other instanceof ConcreteNodeCandidate)) return -1; return this.node.hostname().compareTo(((ConcreteNodeCandidate)other).node.hostname()); } @Override public String toString() { return node.id(); } }
Done, please ptal. I think it would be better if we could expect this not to fail though - perhaps we should have a maintainer looking for it.
public NodeCandidate withNode() { Optional<IP.Allocation> allocation; try { allocation = parent.get().ipConfig().pool().findAllocation(allNodes, nodeRepository.nameResolver()); if (allocation.isEmpty()) throw new IllegalStateException("No free ip addresses on " + parent.get() + ": Cannot allocate node"); } catch (Exception e) { throw new IllegalStateException("Failed allocating IP address on " + parent.get() +": ", e); } Node node = Node.createDockerNode(allocation.get().addresses(), allocation.get().hostname(), parentHostname().get(), resources.with(parent.get().resources().diskSpeed()) .with(parent.get().resources().storageType()), NodeType.tenant); return new ConcreteNodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); }
throw new IllegalStateException("Failed allocating IP address on " + parent.get() +": ", e);
public NodeCandidate withNode() { return this; }
class ConcreteNodeCandidate extends NodeCandidate { private final Node node; ConcreteNodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplus, boolean isNew, boolean isResizeable) { super(freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizeable); this.node = Objects.requireNonNull(node, "Node cannot be null"); } @Override public NodeResources resources() { return node.resources(); } @Override public Optional<String> parentHostname() { return node.parentHostname(); } @Override public NodeType type() { return node.type(); } public Optional<Allocation> allocation() { return node.allocation(); } public Node.State state() { return node.state(); } public boolean wantToRetire() { return node.status().wantToRetire(); } public Flavor flavor() { return node.flavor(); } public NodeCandidate allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) { return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at), freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); } /** Called when the node described by this candidate must be created */ public Node toNode() { return node; } @Override public int compareTo(NodeCandidate other) { int comparison = super.compareTo(other); if (comparison != 0) return comparison; if ( ! (other instanceof ConcreteNodeCandidate)) return -1; return this.node.hostname().compareTo(((ConcreteNodeCandidate)other).node.hostname()); } @Override public String toString() { return node.id(); } }
class ConcreteNodeCandidate extends NodeCandidate { private final Node node; ConcreteNodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplus, boolean isNew, boolean isResizeable) { super(freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizeable); this.node = Objects.requireNonNull(node, "Node cannot be null"); } @Override public NodeResources resources() { return node.resources(); } @Override public Optional<String> parentHostname() { return node.parentHostname(); } @Override public NodeType type() { return node.type(); } @Override public Optional<Allocation> allocation() { return node.allocation(); } @Override public Node.State state() { return node.state(); } @Override public boolean wantToRetire() { return node.status().wantToRetire(); } @Override public Flavor flavor() { return node.flavor(); } @Override public NodeCandidate allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) { return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at), freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); } /** Called when the node described by this candidate must be created */ @Override @Override public Node toNode() { return node; } @Override public boolean isValid() { return true; } @Override public int compareTo(NodeCandidate other) { int comparison = super.compareTo(other); if (comparison != 0) return comparison; if ( ! (other instanceof ConcreteNodeCandidate)) return -1; return this.node.hostname().compareTo(((ConcreteNodeCandidate)other).node.hostname()); } @Override public String toString() { return node.id(); } }
It's the product of countless cases of DNS/IP misconfiguration upstream from us. It will continue to happen. 😞
public NodeCandidate withNode() { Optional<IP.Allocation> allocation; try { allocation = parent.get().ipConfig().pool().findAllocation(allNodes, nodeRepository.nameResolver()); if (allocation.isEmpty()) throw new IllegalStateException("No free ip addresses on " + parent.get() + ": Cannot allocate node"); } catch (Exception e) { throw new IllegalStateException("Failed allocating IP address on " + parent.get() +": ", e); } Node node = Node.createDockerNode(allocation.get().addresses(), allocation.get().hostname(), parentHostname().get(), resources.with(parent.get().resources().diskSpeed()) .with(parent.get().resources().storageType()), NodeType.tenant); return new ConcreteNodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); }
throw new IllegalStateException("Failed allocating IP address on " + parent.get() +": ", e);
public NodeCandidate withNode() { return this; }
class ConcreteNodeCandidate extends NodeCandidate { private final Node node; ConcreteNodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplus, boolean isNew, boolean isResizeable) { super(freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizeable); this.node = Objects.requireNonNull(node, "Node cannot be null"); } @Override public NodeResources resources() { return node.resources(); } @Override public Optional<String> parentHostname() { return node.parentHostname(); } @Override public NodeType type() { return node.type(); } public Optional<Allocation> allocation() { return node.allocation(); } public Node.State state() { return node.state(); } public boolean wantToRetire() { return node.status().wantToRetire(); } public Flavor flavor() { return node.flavor(); } public NodeCandidate allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) { return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at), freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); } /** Called when the node described by this candidate must be created */ public Node toNode() { return node; } @Override public int compareTo(NodeCandidate other) { int comparison = super.compareTo(other); if (comparison != 0) return comparison; if ( ! (other instanceof ConcreteNodeCandidate)) return -1; return this.node.hostname().compareTo(((ConcreteNodeCandidate)other).node.hostname()); } @Override public String toString() { return node.id(); } }
class ConcreteNodeCandidate extends NodeCandidate { private final Node node; ConcreteNodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplus, boolean isNew, boolean isResizeable) { super(freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizeable); this.node = Objects.requireNonNull(node, "Node cannot be null"); } @Override public NodeResources resources() { return node.resources(); } @Override public Optional<String> parentHostname() { return node.parentHostname(); } @Override public NodeType type() { return node.type(); } @Override public Optional<Allocation> allocation() { return node.allocation(); } @Override public Node.State state() { return node.state(); } @Override public boolean wantToRetire() { return node.status().wantToRetire(); } @Override public Flavor flavor() { return node.flavor(); } @Override public NodeCandidate allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) { return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at), freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); } /** Called when the node described by this candidate must be created */ @Override @Override public Node toNode() { return node; } @Override public boolean isValid() { return true; } @Override public int compareTo(NodeCandidate other) { int comparison = super.compareTo(other); if (comparison != 0) return comparison; if ( ! (other instanceof ConcreteNodeCandidate)) return -1; return this.node.hostname().compareTo(((ConcreteNodeCandidate)other).node.hostname()); } @Override public String toString() { return node.id(); } }
I would like to stop using DNS but not sure if that would avoid all of that?
public NodeCandidate withNode() { Optional<IP.Allocation> allocation; try { allocation = parent.get().ipConfig().pool().findAllocation(allNodes, nodeRepository.nameResolver()); if (allocation.isEmpty()) throw new IllegalStateException("No free ip addresses on " + parent.get() + ": Cannot allocate node"); } catch (Exception e) { throw new IllegalStateException("Failed allocating IP address on " + parent.get() +": ", e); } Node node = Node.createDockerNode(allocation.get().addresses(), allocation.get().hostname(), parentHostname().get(), resources.with(parent.get().resources().diskSpeed()) .with(parent.get().resources().storageType()), NodeType.tenant); return new ConcreteNodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); }
throw new IllegalStateException("Failed allocating IP address on " + parent.get() +": ", e);
public NodeCandidate withNode() { return this; }
class ConcreteNodeCandidate extends NodeCandidate { private final Node node; ConcreteNodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplus, boolean isNew, boolean isResizeable) { super(freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizeable); this.node = Objects.requireNonNull(node, "Node cannot be null"); } @Override public NodeResources resources() { return node.resources(); } @Override public Optional<String> parentHostname() { return node.parentHostname(); } @Override public NodeType type() { return node.type(); } public Optional<Allocation> allocation() { return node.allocation(); } public Node.State state() { return node.state(); } public boolean wantToRetire() { return node.status().wantToRetire(); } public Flavor flavor() { return node.flavor(); } public NodeCandidate allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) { return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at), freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); } /** Called when the node described by this candidate must be created */ public Node toNode() { return node; } @Override public int compareTo(NodeCandidate other) { int comparison = super.compareTo(other); if (comparison != 0) return comparison; if ( ! (other instanceof ConcreteNodeCandidate)) return -1; return this.node.hostname().compareTo(((ConcreteNodeCandidate)other).node.hostname()); } @Override public String toString() { return node.id(); } }
class ConcreteNodeCandidate extends NodeCandidate { private final Node node; ConcreteNodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplus, boolean isNew, boolean isResizeable) { super(freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizeable); this.node = Objects.requireNonNull(node, "Node cannot be null"); } @Override public NodeResources resources() { return node.resources(); } @Override public Optional<String> parentHostname() { return node.parentHostname(); } @Override public NodeType type() { return node.type(); } @Override public Optional<Allocation> allocation() { return node.allocation(); } @Override public Node.State state() { return node.state(); } @Override public boolean wantToRetire() { return node.status().wantToRetire(); } @Override public Flavor flavor() { return node.flavor(); } @Override public NodeCandidate allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) { return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at), freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); } /** Called when the node described by this candidate must be created */ @Override @Override public Node toNode() { return node; } @Override public boolean isValid() { return true; } @Override public int compareTo(NodeCandidate other) { int comparison = super.compareTo(other); if (comparison != 0) return comparison; if ( ! (other instanceof ConcreteNodeCandidate)) return -1; return this.node.hostname().compareTo(((ConcreteNodeCandidate)other).node.hostname()); } @Override public String toString() { return node.id(); } }
IP assignment and DNS issues typically correlate, at least in our internal infrastructure: If foo.example.com resolves to 1.1.1.1, but 1.1.1.1 resolves to bar.example.com, there's likely an IP conflict where two distinct hosts have been assigned the same IP. Vespa itself also has strict hostname validation on startup, where hostname and IP are resolved and compared. That's another reason we do this check during allocation as the node would refuse to start in case of misconfiguration anyway.
public NodeCandidate withNode() { Optional<IP.Allocation> allocation; try { allocation = parent.get().ipConfig().pool().findAllocation(allNodes, nodeRepository.nameResolver()); if (allocation.isEmpty()) throw new IllegalStateException("No free ip addresses on " + parent.get() + ": Cannot allocate node"); } catch (Exception e) { throw new IllegalStateException("Failed allocating IP address on " + parent.get() +": ", e); } Node node = Node.createDockerNode(allocation.get().addresses(), allocation.get().hostname(), parentHostname().get(), resources.with(parent.get().resources().diskSpeed()) .with(parent.get().resources().storageType()), NodeType.tenant); return new ConcreteNodeCandidate(node, freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); }
throw new IllegalStateException("Failed allocating IP address on " + parent.get() +": ", e);
public NodeCandidate withNode() { return this; }
class ConcreteNodeCandidate extends NodeCandidate { private final Node node; ConcreteNodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplus, boolean isNew, boolean isResizeable) { super(freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizeable); this.node = Objects.requireNonNull(node, "Node cannot be null"); } @Override public NodeResources resources() { return node.resources(); } @Override public Optional<String> parentHostname() { return node.parentHostname(); } @Override public NodeType type() { return node.type(); } public Optional<Allocation> allocation() { return node.allocation(); } public Node.State state() { return node.state(); } public boolean wantToRetire() { return node.status().wantToRetire(); } public Flavor flavor() { return node.flavor(); } public NodeCandidate allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) { return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at), freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); } /** Called when the node described by this candidate must be created */ public Node toNode() { return node; } @Override public int compareTo(NodeCandidate other) { int comparison = super.compareTo(other); if (comparison != 0) return comparison; if ( ! (other instanceof ConcreteNodeCandidate)) return -1; return this.node.hostname().compareTo(((ConcreteNodeCandidate)other).node.hostname()); } @Override public String toString() { return node.id(); } }
class ConcreteNodeCandidate extends NodeCandidate { private final Node node; ConcreteNodeCandidate(Node node, NodeResources freeParentCapacity, Optional<Node> parent, boolean violatesSpares, boolean isSurplus, boolean isNew, boolean isResizeable) { super(freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizeable); this.node = Objects.requireNonNull(node, "Node cannot be null"); } @Override public NodeResources resources() { return node.resources(); } @Override public Optional<String> parentHostname() { return node.parentHostname(); } @Override public NodeType type() { return node.type(); } @Override public Optional<Allocation> allocation() { return node.allocation(); } @Override public Node.State state() { return node.state(); } @Override public boolean wantToRetire() { return node.status().wantToRetire(); } @Override public Flavor flavor() { return node.flavor(); } @Override public NodeCandidate allocate(ApplicationId owner, ClusterMembership membership, NodeResources requestedResources, Instant at) { return new ConcreteNodeCandidate(node.allocate(owner, membership, requestedResources, at), freeParentCapacity, parent, violatesSpares, isSurplus, isNew, isResizable); } /** Called when the node described by this candidate must be created */ @Override @Override public Node toNode() { return node; } @Override public boolean isValid() { return true; } @Override public int compareTo(NodeCandidate other) { int comparison = super.compareTo(other); if (comparison != 0) return comparison; if ( ! (other instanceof ConcreteNodeCandidate)) return -1; return this.node.hostname().compareTo(((ConcreteNodeCandidate)other).node.hostname()); } @Override public String toString() { return node.id(); } }
```suggestion * request body, so this responsibility falls on the handler consuming the body instead. For the * deployment cases, the request body is validated in {@link ApplicationApiHandler.parseDataParts}. ```
private boolean keyVerifies(PublicKey key, DiscFilterRequest request) { /* This method only checks that the content hash has been signed by the provided public key, but * does not verify the content of the request. jDisc request filters do not allow inspecting the * request body, so this responsibility falls on the handler consuming the body instead. For this * specific case the request body is validated in {@link ApplicationApiHandler.parseDataParts}. */ return new RequestVerifier(key, controller.clock()).verify(Method.valueOf(request.getMethod()), request.getUri(), request.getHeader("X-Timestamp"), request.getHeader("X-Content-Hash"), request.getHeader("X-Authorization")); }
* specific case the request body is validated in {@link ApplicationApiHandler.parseDataParts}.
private boolean keyVerifies(PublicKey key, DiscFilterRequest request) { /* This method only checks that the content hash has been signed by the provided public key, but * does not verify the content of the request. jDisc request filters do not allow inspecting the * request body, so this responsibility falls on the handler consuming the body instead. For the * deployment cases, the request body is validated in {@link ApplicationApiHandler.parseDataParts}. */ return new RequestVerifier(key, controller.clock()).verify(Method.valueOf(request.getMethod()), request.getUri(), request.getHeader("X-Timestamp"), request.getHeader("X-Content-Hash"), request.getHeader("X-Authorization")); }
class SignatureFilter extends JsonSecurityRequestFilterBase { private static final Logger logger = Logger.getLogger(SignatureFilter.class.getName()); private final Controller controller; @Inject public SignatureFilter(Controller controller) { this.controller = controller; } @Override protected Optional<ErrorResponse> filter(DiscFilterRequest request) { if ( request.getAttribute(SecurityContext.ATTRIBUTE_NAME) == null && request.getHeader("X-Authorization") != null) try { getSecurityContext(request).ifPresent(securityContext -> { request.setUserPrincipal(securityContext.principal()); request.setRemoteUser(securityContext.principal().getName()); request.setAttribute(SecurityContext.ATTRIBUTE_NAME, securityContext); }); } catch (Exception e) { logger.log(Level.FINE, () -> "Exception verifying signed request: " + Exceptions.toMessageString(e)); } return Optional.empty(); } private Optional<SecurityContext> getSecurityContext(DiscFilterRequest request) { PublicKey key = KeyUtils.fromPemEncodedPublicKey(new String(Base64.getDecoder().decode(request.getHeader("X-Key")), UTF_8)); if (keyVerifies(key, request)) { ApplicationId id = ApplicationId.fromSerializedForm(request.getHeader("X-Key-Id")); Optional<CloudTenant> tenant = controller.tenants().get(id.tenant()) .filter(CloudTenant.class::isInstance) .map(CloudTenant.class::cast); if (tenant.isPresent() && tenant.get().developerKeys().containsKey(key)) return Optional.of(new SecurityContext(tenant.get().developerKeys().get(key), Set.of(Role.reader(id.tenant()), Role.developer(id.tenant())))); Optional <Application> application = controller.applications().getApplication(TenantAndApplicationId.from(id)); if (application.isPresent() && application.get().deployKeys().contains(key)) return Optional.of(new SecurityContext(new SimplePrincipal("headless@" + id.tenant() + "." + id.application()), Set.of(Role.reader(id.tenant()), Role.headless(id.tenant(), id.application())))); } return Optional.empty(); } }
class SignatureFilter extends JsonSecurityRequestFilterBase { private static final Logger logger = Logger.getLogger(SignatureFilter.class.getName()); private final Controller controller; @Inject public SignatureFilter(Controller controller) { this.controller = controller; } @Override protected Optional<ErrorResponse> filter(DiscFilterRequest request) { if ( request.getAttribute(SecurityContext.ATTRIBUTE_NAME) == null && request.getHeader("X-Authorization") != null) try { getSecurityContext(request).ifPresent(securityContext -> { request.setUserPrincipal(securityContext.principal()); request.setRemoteUser(securityContext.principal().getName()); request.setAttribute(SecurityContext.ATTRIBUTE_NAME, securityContext); }); } catch (Exception e) { logger.log(Level.FINE, () -> "Exception verifying signed request: " + Exceptions.toMessageString(e)); } return Optional.empty(); } private Optional<SecurityContext> getSecurityContext(DiscFilterRequest request) { PublicKey key = KeyUtils.fromPemEncodedPublicKey(new String(Base64.getDecoder().decode(request.getHeader("X-Key")), UTF_8)); if (keyVerifies(key, request)) { ApplicationId id = ApplicationId.fromSerializedForm(request.getHeader("X-Key-Id")); Optional<CloudTenant> tenant = controller.tenants().get(id.tenant()) .filter(CloudTenant.class::isInstance) .map(CloudTenant.class::cast); if (tenant.isPresent() && tenant.get().developerKeys().containsKey(key)) return Optional.of(new SecurityContext(tenant.get().developerKeys().get(key), Set.of(Role.reader(id.tenant()), Role.developer(id.tenant())))); Optional <Application> application = controller.applications().getApplication(TenantAndApplicationId.from(id)); if (application.isPresent() && application.get().deployKeys().contains(key)) return Optional.of(new SecurityContext(new SimplePrincipal("headless@" + id.tenant() + "." + id.application()), Set.of(Role.reader(id.tenant()), Role.headless(id.tenant(), id.application())))); } return Optional.empty(); } }